blob: d6a232a300ad802ad76d2ab9c91a9a1a3784cbd0 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070019#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000020#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070021#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070022
23MODULE_VERSION(DRV_VER);
24MODULE_DEVICE_TABLE(pci, be_dev_ids);
25MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26MODULE_AUTHOR("ServerEngines Corporation");
27MODULE_LICENSE("GPL");
28
Sathya Perla2e588f82011-03-11 02:49:26 +000029static ushort rx_frag_size = 2048;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sathya Perla2e588f82011-03-11 02:49:26 +000031module_param(rx_frag_size, ushort, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070035
Sathya Perla6b7c5b92009-03-11 23:32:03 -070036static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070037 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070038 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070039 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000041 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000042 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070043 { 0 }
44};
45MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000046/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070047static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000048 "CEV",
49 "CTX",
50 "DBUF",
51 "ERX",
52 "Host",
53 "MPU",
54 "NDMA",
55 "PTC ",
56 "RDMA ",
57 "RXF ",
58 "RXIPS ",
59 "RXULP0 ",
60 "RXULP1 ",
61 "RXULP2 ",
62 "TIM ",
63 "TPOST ",
64 "TPRE ",
65 "TXIPS ",
66 "TXULP0 ",
67 "TXULP1 ",
68 "UC ",
69 "WDMA ",
70 "TXULP2 ",
71 "HOST1 ",
72 "P0_OB_LINK ",
73 "P1_OB_LINK ",
74 "HOST_GPIO ",
75 "MBOX ",
76 "AXGMAC0",
77 "AXGMAC1",
78 "JTAG",
79 "MPU_INTPEND"
80};
81/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070082static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000083 "LPCMEMHOST",
84 "MGMT_MAC",
85 "PCS0ONLINE",
86 "MPU_IRAM",
87 "PCS1ONLINE",
88 "PCTL0",
89 "PCTL1",
90 "PMEM",
91 "RR",
92 "TXPB",
93 "RXPP",
94 "XAUI",
95 "TXP",
96 "ARM",
97 "IPC",
98 "HOST2",
99 "HOST3",
100 "HOST4",
101 "HOST5",
102 "HOST6",
103 "HOST7",
104 "HOST8",
105 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700106 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000107 "Unknown",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown"
115};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700116
Sathya Perla752961a2011-10-24 02:45:03 +0000117/* Is BE in a multi-channel mode */
118static inline bool be_is_mc(struct be_adapter *adapter) {
119 return (adapter->function_mode & FLEX10_MODE ||
120 adapter->function_mode & VNIC_MODE ||
121 adapter->function_mode & UMC_ENABLED);
122}
123
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700124static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
125{
126 struct be_dma_mem *mem = &q->dma_mem;
127 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000128 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
129 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700130}
131
132static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
133 u16 len, u16 entry_size)
134{
135 struct be_dma_mem *mem = &q->dma_mem;
136
137 memset(q, 0, sizeof(*q));
138 q->len = len;
139 q->entry_size = entry_size;
140 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000141 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
142 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700143 if (!mem->va)
144 return -1;
145 memset(mem->va, 0, mem->size);
146 return 0;
147}
148
Sathya Perla8788fdc2009-07-27 22:52:03 +0000149static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150{
Sathya Perladb3ea782011-08-22 19:41:52 +0000151 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000152
Sathya Perlacf588472010-02-14 21:22:01 +0000153 if (adapter->eeh_err)
154 return;
155
Sathya Perladb3ea782011-08-22 19:41:52 +0000156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
157 &reg);
158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
Sathya Perla5f0b8492009-07-27 22:52:56 +0000160 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700161 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000162 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700163 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000164 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700165 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000166
Sathya Perladb3ea782011-08-22 19:41:52 +0000167 pci_write_config_dword(adapter->pdev,
168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169}
170
Sathya Perla8788fdc2009-07-27 22:52:03 +0000171static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172{
173 u32 val = 0;
174 val |= qid & DB_RQ_RING_ID_MASK;
175 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000176
177 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000178 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700179}
180
Sathya Perla8788fdc2009-07-27 22:52:03 +0000181static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700182{
183 u32 val = 0;
184 val |= qid & DB_TXULP_RING_ID_MASK;
185 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000186
187 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000188 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700189}
190
Sathya Perla8788fdc2009-07-27 22:52:03 +0000191static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700192 bool arm, bool clear_int, u16 num_popped)
193{
194 u32 val = 0;
195 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000196 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
197 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000198
199 if (adapter->eeh_err)
200 return;
201
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202 if (arm)
203 val |= 1 << DB_EQ_REARM_SHIFT;
204 if (clear_int)
205 val |= 1 << DB_EQ_CLR_SHIFT;
206 val |= 1 << DB_EQ_EVNT_SHIFT;
207 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000208 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209}
210
Sathya Perla8788fdc2009-07-27 22:52:03 +0000211void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700212{
213 u32 val = 0;
214 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000215 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
216 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000217
218 if (adapter->eeh_err)
219 return;
220
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700221 if (arm)
222 val |= 1 << DB_CQ_REARM_SHIFT;
223 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000224 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700225}
226
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700227static int be_mac_addr_set(struct net_device *netdev, void *p)
228{
229 struct be_adapter *adapter = netdev_priv(netdev);
230 struct sockaddr *addr = p;
231 int status = 0;
232
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000233 if (!is_valid_ether_addr(addr->sa_data))
234 return -EADDRNOTAVAIL;
235
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000236 /* MAC addr configuration will be done in hardware for VFs
237 * by their corresponding PFs. Just copy to netdev addr here
238 */
239 if (!be_physfn(adapter))
240 goto netdev_addr;
241
Ajit Khapardef8617e02011-02-11 13:36:37 +0000242 status = be_cmd_pmac_del(adapter, adapter->if_handle,
243 adapter->pmac_id, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000244 if (status)
245 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700246
Sathya Perlaa65027e2009-08-17 00:58:04 +0000247 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000248 adapter->if_handle, &adapter->pmac_id, 0);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000249netdev_addr:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700250 if (!status)
251 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
252
253 return status;
254}
255
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000256static void populate_be2_stats(struct be_adapter *adapter)
257{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000258 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
259 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
260 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000261 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000262 &rxf_stats->port[adapter->port_num];
263 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000264
Sathya Perlaac124ff2011-07-25 19:10:14 +0000265 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000266 drvs->rx_pause_frames = port_stats->rx_pause_frames;
267 drvs->rx_crc_errors = port_stats->rx_crc_errors;
268 drvs->rx_control_frames = port_stats->rx_control_frames;
269 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
270 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
271 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
272 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
273 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
274 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
275 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
276 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
277 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
278 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
279 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000280 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000281 drvs->rx_dropped_header_too_small =
282 port_stats->rx_dropped_header_too_small;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000283 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000284 drvs->rx_alignment_symbol_errors =
285 port_stats->rx_alignment_symbol_errors;
286
287 drvs->tx_pauseframes = port_stats->tx_pauseframes;
288 drvs->tx_controlframes = port_stats->tx_controlframes;
289
290 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000291 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000292 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000293 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000294 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
295 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
296 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
297 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
298 drvs->forwarded_packets = rxf_stats->forwarded_packets;
299 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000300 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
301 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000302 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
303}
304
305static void populate_be3_stats(struct be_adapter *adapter)
306{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000307 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
308 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
309 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000310 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000311 &rxf_stats->port[adapter->port_num];
312 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000313
Sathya Perlaac124ff2011-07-25 19:10:14 +0000314 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000315 drvs->rx_pause_frames = port_stats->rx_pause_frames;
316 drvs->rx_crc_errors = port_stats->rx_crc_errors;
317 drvs->rx_control_frames = port_stats->rx_control_frames;
318 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
319 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
320 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
321 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
322 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
323 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
324 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
325 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
326 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
327 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
328 drvs->rx_dropped_header_too_small =
329 port_stats->rx_dropped_header_too_small;
330 drvs->rx_input_fifo_overflow_drop =
331 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000332 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000333 drvs->rx_alignment_symbol_errors =
334 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000335 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000336 drvs->tx_pauseframes = port_stats->tx_pauseframes;
337 drvs->tx_controlframes = port_stats->tx_controlframes;
338 drvs->jabber_events = port_stats->jabber_events;
339 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
340 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
341 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
342 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
343 drvs->forwarded_packets = rxf_stats->forwarded_packets;
344 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000345 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
346 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000347 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
348}
349
Selvin Xavier005d5692011-05-16 07:36:35 +0000350static void populate_lancer_stats(struct be_adapter *adapter)
351{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000352
Selvin Xavier005d5692011-05-16 07:36:35 +0000353 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000354 struct lancer_pport_stats *pport_stats =
355 pport_stats_from_cmd(adapter);
356
357 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
358 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
359 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
360 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000361 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000362 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000363 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
364 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
365 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
366 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
367 drvs->rx_dropped_tcp_length =
368 pport_stats->rx_dropped_invalid_tcp_length;
369 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
370 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
371 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
372 drvs->rx_dropped_header_too_small =
373 pport_stats->rx_dropped_header_too_small;
374 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
375 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000376 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000377 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000378 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
379 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000380 drvs->jabber_events = pport_stats->rx_jabbers;
Selvin Xavier005d5692011-05-16 07:36:35 +0000381 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000382 drvs->forwarded_packets = pport_stats->num_forwards_lo;
383 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000384 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000385 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000386}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000387
Sathya Perla09c1c682011-08-22 19:41:53 +0000388static void accumulate_16bit_val(u32 *acc, u16 val)
389{
390#define lo(x) (x & 0xFFFF)
391#define hi(x) (x & 0xFFFF0000)
392 bool wrapped = val < lo(*acc);
393 u32 newacc = hi(*acc) + val;
394
395 if (wrapped)
396 newacc += 65536;
397 ACCESS_ONCE(*acc) = newacc;
398}
399
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000400void be_parse_stats(struct be_adapter *adapter)
401{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000402 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
403 struct be_rx_obj *rxo;
404 int i;
405
Selvin Xavier005d5692011-05-16 07:36:35 +0000406 if (adapter->generation == BE_GEN3) {
407 if (lancer_chip(adapter))
408 populate_lancer_stats(adapter);
409 else
410 populate_be3_stats(adapter);
411 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000412 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000413 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000414
415 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000416 for_all_rx_queues(adapter, rxo, i) {
417 /* below erx HW counter can actually wrap around after
418 * 65535. Driver accumulates a 32-bit value
419 */
420 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
421 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
422 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000423}
424
Sathya Perlaab1594e2011-07-25 19:10:15 +0000425static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
426 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700427{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000428 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000429 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700430 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000431 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000432 u64 pkts, bytes;
433 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700434 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700435
Sathya Perla3abcded2010-10-03 22:12:27 -0700436 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000437 const struct be_rx_stats *rx_stats = rx_stats(rxo);
438 do {
439 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
440 pkts = rx_stats(rxo)->rx_pkts;
441 bytes = rx_stats(rxo)->rx_bytes;
442 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
443 stats->rx_packets += pkts;
444 stats->rx_bytes += bytes;
445 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
446 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
447 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700448 }
449
Sathya Perla3c8def92011-06-12 20:01:58 +0000450 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000451 const struct be_tx_stats *tx_stats = tx_stats(txo);
452 do {
453 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
454 pkts = tx_stats(txo)->tx_pkts;
455 bytes = tx_stats(txo)->tx_bytes;
456 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
457 stats->tx_packets += pkts;
458 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000459 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700460
461 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000462 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000463 drvs->rx_alignment_symbol_errors +
464 drvs->rx_in_range_errors +
465 drvs->rx_out_range_errors +
466 drvs->rx_frame_too_long +
467 drvs->rx_dropped_too_small +
468 drvs->rx_dropped_too_short +
469 drvs->rx_dropped_header_too_small +
470 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000471 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700472
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700473 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000474 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000475 drvs->rx_out_range_errors +
476 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000477
Sathya Perlaab1594e2011-07-25 19:10:15 +0000478 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700479
480 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000481 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000482
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700483 /* receiver fifo overrun */
484 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000485 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000486 drvs->rx_input_fifo_overflow_drop +
487 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000488 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700489}
490
Sathya Perlaea172a02011-08-02 19:57:42 +0000491void be_link_status_update(struct be_adapter *adapter, u32 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700492{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700493 struct net_device *netdev = adapter->netdev;
494
Sathya Perlaea172a02011-08-02 19:57:42 +0000495 /* when link status changes, link speed must be re-queried from card */
496 adapter->link_speed = -1;
497 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
498 netif_carrier_on(netdev);
499 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
500 } else {
501 netif_carrier_off(netdev);
502 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700503 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700504}
505
Sathya Perla3c8def92011-06-12 20:01:58 +0000506static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000507 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700508{
Sathya Perla3c8def92011-06-12 20:01:58 +0000509 struct be_tx_stats *stats = tx_stats(txo);
510
Sathya Perlaab1594e2011-07-25 19:10:15 +0000511 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000512 stats->tx_reqs++;
513 stats->tx_wrbs += wrb_cnt;
514 stats->tx_bytes += copied;
515 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700516 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000517 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000518 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700519}
520
521/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000522static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
523 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700524{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700525 int cnt = (skb->len > skb->data_len);
526
527 cnt += skb_shinfo(skb)->nr_frags;
528
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700529 /* to account for hdr wrb */
530 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000531 if (lancer_chip(adapter) || !(cnt & 1)) {
532 *dummy = false;
533 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700534 /* add a dummy to make it an even num */
535 cnt++;
536 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000537 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700538 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
539 return cnt;
540}
541
542static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
543{
544 wrb->frag_pa_hi = upper_32_bits(addr);
545 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
546 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
547}
548
Somnath Koturcc4ce022010-10-21 07:11:14 -0700549static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
550 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700551{
Somnath Koturcc4ce022010-10-21 07:11:14 -0700552 u8 vlan_prio = 0;
553 u16 vlan_tag = 0;
554
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700555 memset(hdr, 0, sizeof(*hdr));
556
557 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
558
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000559 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700560 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
561 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
562 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000563 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000564 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000565 if (lancer_chip(adapter) && adapter->sli_family ==
566 LANCER_A0_SLI_FAMILY) {
567 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
568 if (is_tcp_pkt(skb))
569 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
570 tcpcs, hdr, 1);
571 else if (is_udp_pkt(skb))
572 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
573 udpcs, hdr, 1);
574 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700575 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
576 if (is_tcp_pkt(skb))
577 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
578 else if (is_udp_pkt(skb))
579 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
580 }
581
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700582 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700583 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700584 vlan_tag = vlan_tx_tag_get(skb);
585 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
586 /* If vlan priority provided by OS is NOT in available bmap */
587 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
588 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
589 adapter->recommended_prio;
590 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700591 }
592
593 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
594 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
595 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
597}
598
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000599static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000600 bool unmap_single)
601{
602 dma_addr_t dma;
603
604 be_dws_le_to_cpu(wrb, sizeof(*wrb));
605
606 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000607 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000608 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000609 dma_unmap_single(dev, dma, wrb->frag_len,
610 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000611 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000612 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000613 }
614}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700615
Sathya Perla3c8def92011-06-12 20:01:58 +0000616static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700617 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
618{
Sathya Perla7101e112010-03-22 20:41:12 +0000619 dma_addr_t busaddr;
620 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000621 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700622 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700623 struct be_eth_wrb *wrb;
624 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000625 bool map_single = false;
626 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700627
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700628 hdr = queue_head_node(txq);
629 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000630 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700631
David S. Millerebc8d2a2009-06-09 01:01:31 -0700632 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700633 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000634 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
635 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000636 goto dma_err;
637 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700638 wrb = queue_head_node(txq);
639 wrb_fill(wrb, busaddr, len);
640 be_dws_cpu_to_le(wrb, sizeof(*wrb));
641 queue_head_inc(txq);
642 copied += len;
643 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700644
David S. Millerebc8d2a2009-06-09 01:01:31 -0700645 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000646 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700647 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000648 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000649 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000650 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000651 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700652 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000653 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700654 be_dws_cpu_to_le(wrb, sizeof(*wrb));
655 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000656 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657 }
658
659 if (dummy_wrb) {
660 wrb = queue_head_node(txq);
661 wrb_fill(wrb, 0, 0);
662 be_dws_cpu_to_le(wrb, sizeof(*wrb));
663 queue_head_inc(txq);
664 }
665
Somnath Koturcc4ce022010-10-21 07:11:14 -0700666 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700667 be_dws_cpu_to_le(hdr, sizeof(*hdr));
668
669 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000670dma_err:
671 txq->head = map_head;
672 while (copied) {
673 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000674 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000675 map_single = false;
676 copied -= wrb->frag_len;
677 queue_head_inc(txq);
678 }
679 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700680}
681
Stephen Hemminger613573252009-08-31 19:50:58 +0000682static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700683 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684{
685 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000686 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
687 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700688 u32 wrb_cnt = 0, copied = 0;
689 u32 start = txq->head;
690 bool dummy_wrb, stopped = false;
691
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000692 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700693
Sathya Perla3c8def92011-06-12 20:01:58 +0000694 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000695 if (copied) {
696 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000697 BUG_ON(txo->sent_skb_list[start]);
698 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700699
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000700 /* Ensure txq has space for the next skb; Else stop the queue
701 * *BEFORE* ringing the tx doorbell, so that we serialze the
702 * tx compls of the current transmit which'll wake up the queue
703 */
Sathya Perla7101e112010-03-22 20:41:12 +0000704 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000705 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
706 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000707 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000708 stopped = true;
709 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700710
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000711 be_txq_notify(adapter, txq->id, wrb_cnt);
712
Sathya Perla3c8def92011-06-12 20:01:58 +0000713 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000714 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000715 } else {
716 txq->head = start;
717 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700718 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700719 return NETDEV_TX_OK;
720}
721
722static int be_change_mtu(struct net_device *netdev, int new_mtu)
723{
724 struct be_adapter *adapter = netdev_priv(netdev);
725 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000726 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
727 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700728 dev_info(&adapter->pdev->dev,
729 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000730 BE_MIN_MTU,
731 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700732 return -EINVAL;
733 }
734 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
735 netdev->mtu, new_mtu);
736 netdev->mtu = new_mtu;
737 return 0;
738}
739
740/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000741 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
742 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700743 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000744static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700745{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700746 u16 vtag[BE_NUM_VLANS_SUPPORTED];
747 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000748 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000749 u32 if_handle;
750
751 if (vf) {
752 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
753 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
754 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
755 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700756
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000757 /* No need to further configure vids if in promiscuous mode */
758 if (adapter->promiscuous)
759 return 0;
760
Ajit Khaparde82903e42010-02-09 01:34:57 +0000761 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700762 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000763 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700764 if (adapter->vlan_tag[i]) {
765 vtag[ntags] = cpu_to_le16(i);
766 ntags++;
767 }
768 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700769 status = be_cmd_vlan_config(adapter, adapter->if_handle,
770 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700771 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700772 status = be_cmd_vlan_config(adapter, adapter->if_handle,
773 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700774 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000775
Sathya Perlab31c50a2009-09-17 10:30:13 -0700776 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700777}
778
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700779static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
780{
781 struct be_adapter *adapter = netdev_priv(netdev);
782
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000783 adapter->vlans_added++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000784 if (!be_physfn(adapter))
785 return;
786
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700787 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000788 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000789 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700790}
791
792static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
793{
794 struct be_adapter *adapter = netdev_priv(netdev);
795
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000796 adapter->vlans_added--;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000797
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000798 if (!be_physfn(adapter))
799 return;
800
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700801 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000802 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000803 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700804}
805
Sathya Perlaa54769f2011-10-24 02:45:00 +0000806static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700807{
808 struct be_adapter *adapter = netdev_priv(netdev);
809
810 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000811 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000812 adapter->promiscuous = true;
813 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700814 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000815
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300816 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000817 if (adapter->promiscuous) {
818 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000819 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000820
821 if (adapter->vlans_added)
822 be_vid_config(adapter, false, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000823 }
824
Sathya Perlae7b909a2009-11-22 22:01:10 +0000825 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000826 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000827 netdev_mc_count(netdev) > BE_MAX_MC) {
828 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000829 goto done;
830 }
831
Sathya Perla5b8821b2011-08-02 19:57:44 +0000832 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000833done:
834 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700835}
836
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000837static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
838{
839 struct be_adapter *adapter = netdev_priv(netdev);
840 int status;
841
842 if (!adapter->sriov_enabled)
843 return -EPERM;
844
845 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
846 return -EINVAL;
847
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000848 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
849 status = be_cmd_pmac_del(adapter,
850 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000851 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000852
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000853 status = be_cmd_pmac_add(adapter, mac,
854 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000855 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000856
857 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000858 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
859 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000860 else
861 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
862
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000863 return status;
864}
865
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000866static int be_get_vf_config(struct net_device *netdev, int vf,
867 struct ifla_vf_info *vi)
868{
869 struct be_adapter *adapter = netdev_priv(netdev);
870
871 if (!adapter->sriov_enabled)
872 return -EPERM;
873
874 if (vf >= num_vfs)
875 return -EINVAL;
876
877 vi->vf = vf;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000878 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000879 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000880 vi->qos = 0;
881 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
882
883 return 0;
884}
885
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000886static int be_set_vf_vlan(struct net_device *netdev,
887 int vf, u16 vlan, u8 qos)
888{
889 struct be_adapter *adapter = netdev_priv(netdev);
890 int status = 0;
891
892 if (!adapter->sriov_enabled)
893 return -EPERM;
894
895 if ((vf >= num_vfs) || (vlan > 4095))
896 return -EINVAL;
897
898 if (vlan) {
899 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
900 adapter->vlans_added++;
901 } else {
902 adapter->vf_cfg[vf].vf_vlan_tag = 0;
903 adapter->vlans_added--;
904 }
905
906 status = be_vid_config(adapter, true, vf);
907
908 if (status)
909 dev_info(&adapter->pdev->dev,
910 "VLAN %d config on VF %d failed\n", vlan, vf);
911 return status;
912}
913
Ajit Khapardee1d18732010-07-23 01:52:13 +0000914static int be_set_vf_tx_rate(struct net_device *netdev,
915 int vf, int rate)
916{
917 struct be_adapter *adapter = netdev_priv(netdev);
918 int status = 0;
919
920 if (!adapter->sriov_enabled)
921 return -EPERM;
922
923 if ((vf >= num_vfs) || (rate < 0))
924 return -EINVAL;
925
926 if (rate > 10000)
927 rate = 10000;
928
929 adapter->vf_cfg[vf].vf_tx_rate = rate;
Ajit Khaparde856c4012011-02-11 13:32:32 +0000930 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +0000931
932 if (status)
933 dev_info(&adapter->pdev->dev,
934 "tx rate %d on VF %d failed\n", rate, vf);
935 return status;
936}
937
Sathya Perlaac124ff2011-07-25 19:10:14 +0000938static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700939{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000940 struct be_eq_obj *rx_eq = &rxo->rx_eq;
941 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -0700942 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000943 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000944 u64 pkts;
945 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000946
947 if (!rx_eq->enable_aic)
948 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700949
Sathya Perla4097f662009-03-24 16:40:13 -0700950 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -0700951 if (time_before(now, stats->rx_jiffies)) {
952 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -0700953 return;
954 }
955
Sathya Perlaac124ff2011-07-25 19:10:14 +0000956 /* Update once a second */
957 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -0700958 return;
959
Sathya Perlaab1594e2011-07-25 19:10:15 +0000960 do {
961 start = u64_stats_fetch_begin_bh(&stats->sync);
962 pkts = stats->rx_pkts;
963 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
964
Eric Dumazet68c3e5a2011-08-09 06:23:07 +0000965 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000966 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -0700967 stats->rx_jiffies = now;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000968 eqd = stats->rx_pps / 110000;
969 eqd = eqd << 3;
970 if (eqd > rx_eq->max_eqd)
971 eqd = rx_eq->max_eqd;
972 if (eqd < rx_eq->min_eqd)
973 eqd = rx_eq->min_eqd;
974 if (eqd < 10)
975 eqd = 0;
976 if (eqd != rx_eq->cur_eqd) {
977 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
978 rx_eq->cur_eqd = eqd;
979 }
Sathya Perla4097f662009-03-24 16:40:13 -0700980}
981
Sathya Perla3abcded2010-10-03 22:12:27 -0700982static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +0000983 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -0700984{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000985 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -0700986
Sathya Perlaab1594e2011-07-25 19:10:15 +0000987 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -0700988 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000989 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -0700990 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000991 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -0700992 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000993 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000994 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000995 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700996}
997
Sathya Perla2e588f82011-03-11 02:49:26 +0000998static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -0700999{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001000 /* L4 checksum is not reliable for non TCP/UDP packets.
1001 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001002 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1003 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001004}
1005
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001006static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -07001007get_rx_page_info(struct be_adapter *adapter,
1008 struct be_rx_obj *rxo,
1009 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001010{
1011 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001012 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001013
Sathya Perla3abcded2010-10-03 22:12:27 -07001014 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001015 BUG_ON(!rx_page_info->page);
1016
Ajit Khaparde205859a2010-02-09 01:34:21 +00001017 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001018 dma_unmap_page(&adapter->pdev->dev,
1019 dma_unmap_addr(rx_page_info, bus),
1020 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001021 rx_page_info->last_page_user = false;
1022 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001023
1024 atomic_dec(&rxq->used);
1025 return rx_page_info;
1026}
1027
1028/* Throwaway the data in the Rx completion */
1029static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001030 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001031 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001032{
Sathya Perla3abcded2010-10-03 22:12:27 -07001033 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001034 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001035 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001036
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001037 for (i = 0; i < num_rcvd; i++) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001038 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001039 put_page(page_info->page);
1040 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001041 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001042 }
1043}
1044
1045/*
1046 * skb_fill_rx_data forms a complete skb for an ether frame
1047 * indicated by rxcp.
1048 */
Sathya Perla3abcded2010-10-03 22:12:27 -07001049static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001050 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001051{
Sathya Perla3abcded2010-10-03 22:12:27 -07001052 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001053 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001054 u16 i, j;
1055 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001056 u8 *start;
1057
Sathya Perla2e588f82011-03-11 02:49:26 +00001058 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001059 start = page_address(page_info->page) + page_info->page_offset;
1060 prefetch(start);
1061
1062 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001063 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001064
1065 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001066 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001067 memcpy(skb->data, start, hdr_len);
1068 skb->len = curr_frag_len;
1069 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1070 /* Complete packet has now been moved to data */
1071 put_page(page_info->page);
1072 skb->data_len = 0;
1073 skb->tail += curr_frag_len;
1074 } else {
1075 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001076 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001077 skb_shinfo(skb)->frags[0].page_offset =
1078 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001079 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001080 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001081 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001082 skb->tail += hdr_len;
1083 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001084 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001085
Sathya Perla2e588f82011-03-11 02:49:26 +00001086 if (rxcp->pkt_size <= rx_frag_size) {
1087 BUG_ON(rxcp->num_rcvd != 1);
1088 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001089 }
1090
1091 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001092 index_inc(&rxcp->rxq_idx, rxq->len);
1093 remaining = rxcp->pkt_size - curr_frag_len;
1094 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1095 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1096 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001097
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001098 /* Coalesce all frags from the same physical page in one slot */
1099 if (page_info->page_offset == 0) {
1100 /* Fresh page */
1101 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001102 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001103 skb_shinfo(skb)->frags[j].page_offset =
1104 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001105 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001106 skb_shinfo(skb)->nr_frags++;
1107 } else {
1108 put_page(page_info->page);
1109 }
1110
Eric Dumazet9e903e02011-10-18 21:00:24 +00001111 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001112 skb->len += curr_frag_len;
1113 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001114 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001115 remaining -= curr_frag_len;
1116 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001117 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001118 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001119 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001120}
1121
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001122/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001123static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001124 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001125 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001126{
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001127 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001128 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001129
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001130 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001131 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001132 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla3abcded2010-10-03 22:12:27 -07001133 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001134 return;
1135 }
1136
Sathya Perla2e588f82011-03-11 02:49:26 +00001137 skb_fill_rx_data(adapter, rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001138
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001139 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001140 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001141 else
1142 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001143
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001144 skb->protocol = eth_type_trans(skb, netdev);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001145 if (adapter->netdev->features & NETIF_F_RXHASH)
1146 skb->rxhash = rxcp->rss_hash;
1147
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001148
Jiri Pirko343e43c2011-08-25 02:50:51 +00001149 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001150 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1151
1152 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001153}
1154
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001155/* Process the RX completion indicated by rxcp when GRO is enabled */
1156static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001157 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001158 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001159{
1160 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001161 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001162 struct be_queue_info *rxq = &rxo->q;
1163 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001164 u16 remaining, curr_frag_len;
1165 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001166
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001167 skb = napi_get_frags(&eq_obj->napi);
1168 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001169 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001170 return;
1171 }
1172
Sathya Perla2e588f82011-03-11 02:49:26 +00001173 remaining = rxcp->pkt_size;
1174 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1175 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001176
1177 curr_frag_len = min(remaining, rx_frag_size);
1178
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001179 /* Coalesce all frags from the same physical page in one slot */
1180 if (i == 0 || page_info->page_offset == 0) {
1181 /* First frag or Fresh page */
1182 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001183 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001184 skb_shinfo(skb)->frags[j].page_offset =
1185 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001186 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001187 } else {
1188 put_page(page_info->page);
1189 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001190 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001191 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001192 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001193 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001194 memset(page_info, 0, sizeof(*page_info));
1195 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001196 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001197
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001198 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001199 skb->len = rxcp->pkt_size;
1200 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001201 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001202 if (adapter->netdev->features & NETIF_F_RXHASH)
1203 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001204
Jiri Pirko343e43c2011-08-25 02:50:51 +00001205 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001206 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1207
1208 napi_gro_frags(&eq_obj->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001209}
1210
Sathya Perla2e588f82011-03-11 02:49:26 +00001211static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1212 struct be_eth_rx_compl *compl,
1213 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001214{
Sathya Perla2e588f82011-03-11 02:49:26 +00001215 rxcp->pkt_size =
1216 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1217 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1218 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1219 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001220 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001221 rxcp->ip_csum =
1222 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1223 rxcp->l4_csum =
1224 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1225 rxcp->ipv6 =
1226 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1227 rxcp->rxq_idx =
1228 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1229 rxcp->num_rcvd =
1230 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1231 rxcp->pkt_type =
1232 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001233 rxcp->rss_hash =
1234 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001235 if (rxcp->vlanf) {
1236 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001237 compl);
1238 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1239 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001240 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001241 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001242}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001243
Sathya Perla2e588f82011-03-11 02:49:26 +00001244static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1245 struct be_eth_rx_compl *compl,
1246 struct be_rx_compl_info *rxcp)
1247{
1248 rxcp->pkt_size =
1249 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1250 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1251 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1252 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001253 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001254 rxcp->ip_csum =
1255 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1256 rxcp->l4_csum =
1257 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1258 rxcp->ipv6 =
1259 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1260 rxcp->rxq_idx =
1261 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1262 rxcp->num_rcvd =
1263 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1264 rxcp->pkt_type =
1265 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001266 rxcp->rss_hash =
1267 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001268 if (rxcp->vlanf) {
1269 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001270 compl);
1271 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1272 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001273 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001274 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001275}
1276
1277static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1278{
1279 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1280 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1281 struct be_adapter *adapter = rxo->adapter;
1282
1283 /* For checking the valid bit it is Ok to use either definition as the
1284 * valid bit is at the same position in both v0 and v1 Rx compl */
1285 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001286 return NULL;
1287
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001288 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001289 be_dws_le_to_cpu(compl, sizeof(*compl));
1290
1291 if (adapter->be3_native)
1292 be_parse_rx_compl_v1(adapter, compl, rxcp);
1293 else
1294 be_parse_rx_compl_v0(adapter, compl, rxcp);
1295
Sathya Perla15d72182011-03-21 20:49:26 +00001296 if (rxcp->vlanf) {
1297 /* vlanf could be wrongly set in some cards.
1298 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001299 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001300 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001301
Sathya Perla15d72182011-03-21 20:49:26 +00001302 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001303 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001304
Somnath Kotur939cf302011-08-18 21:51:49 -07001305 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001306 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001307 rxcp->vlanf = 0;
1308 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001309
1310 /* As the compl has been parsed, reset it; we wont touch it again */
1311 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001312
Sathya Perla3abcded2010-10-03 22:12:27 -07001313 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001314 return rxcp;
1315}
1316
Eric Dumazet1829b082011-03-01 05:48:12 +00001317static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001318{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001319 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001320
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001321 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001322 gfp |= __GFP_COMP;
1323 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001324}
1325
1326/*
1327 * Allocate a page, split it to fragments of size rx_frag_size and post as
1328 * receive buffers to BE
1329 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001330static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001331{
Sathya Perla3abcded2010-10-03 22:12:27 -07001332 struct be_adapter *adapter = rxo->adapter;
1333 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001334 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001335 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001336 struct page *pagep = NULL;
1337 struct be_eth_rx_d *rxd;
1338 u64 page_dmaaddr = 0, frag_dmaaddr;
1339 u32 posted, page_offset = 0;
1340
Sathya Perla3abcded2010-10-03 22:12:27 -07001341 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001342 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1343 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001344 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001345 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001346 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001347 break;
1348 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001349 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1350 0, adapter->big_page_size,
1351 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001352 page_info->page_offset = 0;
1353 } else {
1354 get_page(pagep);
1355 page_info->page_offset = page_offset + rx_frag_size;
1356 }
1357 page_offset = page_info->page_offset;
1358 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001359 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001360 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1361
1362 rxd = queue_head_node(rxq);
1363 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1364 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001365
1366 /* Any space left in the current big page for another frag? */
1367 if ((page_offset + rx_frag_size + rx_frag_size) >
1368 adapter->big_page_size) {
1369 pagep = NULL;
1370 page_info->last_page_user = true;
1371 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001372
1373 prev_page_info = page_info;
1374 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001375 page_info = &page_info_tbl[rxq->head];
1376 }
1377 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001378 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001379
1380 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001381 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001382 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001383 } else if (atomic_read(&rxq->used) == 0) {
1384 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001385 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001386 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001387}
1388
Sathya Perla5fb379e2009-06-18 00:02:59 +00001389static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001390{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001391 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1392
1393 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1394 return NULL;
1395
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001396 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001397 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1398
1399 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1400
1401 queue_tail_inc(tx_cq);
1402 return txcp;
1403}
1404
Sathya Perla3c8def92011-06-12 20:01:58 +00001405static u16 be_tx_compl_process(struct be_adapter *adapter,
1406 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001407{
Sathya Perla3c8def92011-06-12 20:01:58 +00001408 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001409 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001410 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001411 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001412 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1413 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001414
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001415 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001416 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001417 sent_skbs[txq->tail] = NULL;
1418
1419 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001420 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001421
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001422 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001423 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001424 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001425 unmap_tx_frag(&adapter->pdev->dev, wrb,
1426 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001427 unmap_skb_hdr = false;
1428
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001429 num_wrbs++;
1430 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001431 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001432
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001433 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001434 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001435}
1436
Sathya Perla859b1e42009-08-10 03:43:51 +00001437static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1438{
1439 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1440
1441 if (!eqe->evt)
1442 return NULL;
1443
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001444 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001445 eqe->evt = le32_to_cpu(eqe->evt);
1446 queue_tail_inc(&eq_obj->q);
1447 return eqe;
1448}
1449
1450static int event_handle(struct be_adapter *adapter,
Sathya Perla3c8def92011-06-12 20:01:58 +00001451 struct be_eq_obj *eq_obj,
1452 bool rearm)
Sathya Perla859b1e42009-08-10 03:43:51 +00001453{
1454 struct be_eq_entry *eqe;
1455 u16 num = 0;
1456
1457 while ((eqe = event_get(eq_obj)) != NULL) {
1458 eqe->evt = 0;
1459 num++;
1460 }
1461
1462 /* Deal with any spurious interrupts that come
1463 * without events
1464 */
Sathya Perla3c8def92011-06-12 20:01:58 +00001465 if (!num)
1466 rearm = true;
1467
1468 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
Sathya Perla859b1e42009-08-10 03:43:51 +00001469 if (num)
1470 napi_schedule(&eq_obj->napi);
1471
1472 return num;
1473}
1474
1475/* Just read and notify events without processing them.
1476 * Used at the time of destroying event queues */
1477static void be_eq_clean(struct be_adapter *adapter,
1478 struct be_eq_obj *eq_obj)
1479{
1480 struct be_eq_entry *eqe;
1481 u16 num = 0;
1482
1483 while ((eqe = event_get(eq_obj)) != NULL) {
1484 eqe->evt = 0;
1485 num++;
1486 }
1487
1488 if (num)
1489 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1490}
1491
Sathya Perla3abcded2010-10-03 22:12:27 -07001492static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001493{
1494 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001495 struct be_queue_info *rxq = &rxo->q;
1496 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001497 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001498 u16 tail;
1499
1500 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001501 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1502 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001503 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001504 }
1505
1506 /* Then free posted rx buffer that were not used */
1507 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001508 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001509 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001510 put_page(page_info->page);
1511 memset(page_info, 0, sizeof(*page_info));
1512 }
1513 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001514 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001515}
1516
Sathya Perla3c8def92011-06-12 20:01:58 +00001517static void be_tx_compl_clean(struct be_adapter *adapter,
1518 struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001519{
Sathya Perla3c8def92011-06-12 20:01:58 +00001520 struct be_queue_info *tx_cq = &txo->cq;
1521 struct be_queue_info *txq = &txo->q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001522 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001523 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00001524 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perlab03388d2010-02-18 00:37:17 +00001525 struct sk_buff *sent_skb;
1526 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001527
Sathya Perlaa8e91792009-08-10 03:42:43 +00001528 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1529 do {
1530 while ((txcp = be_tx_compl_get(tx_cq))) {
1531 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1532 wrb_index, txcp);
Sathya Perla3c8def92011-06-12 20:01:58 +00001533 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001534 cmpl++;
1535 }
1536 if (cmpl) {
1537 be_cq_notify(adapter, tx_cq->id, false, cmpl);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001538 atomic_sub(num_wrbs, &txq->used);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001539 cmpl = 0;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001540 num_wrbs = 0;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001541 }
1542
1543 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1544 break;
1545
1546 mdelay(1);
1547 } while (true);
1548
1549 if (atomic_read(&txq->used))
1550 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1551 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001552
1553 /* free posted tx for which compls will never arrive */
1554 while (atomic_read(&txq->used)) {
1555 sent_skb = sent_skbs[txq->tail];
1556 end_idx = txq->tail;
1557 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001558 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1559 txq->len);
Sathya Perla3c8def92011-06-12 20:01:58 +00001560 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001561 atomic_sub(num_wrbs, &txq->used);
Sathya Perlab03388d2010-02-18 00:37:17 +00001562 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001563}
1564
Sathya Perla5fb379e2009-06-18 00:02:59 +00001565static void be_mcc_queues_destroy(struct be_adapter *adapter)
1566{
1567 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001568
Sathya Perla8788fdc2009-07-27 22:52:03 +00001569 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001570 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001571 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001572 be_queue_free(adapter, q);
1573
Sathya Perla8788fdc2009-07-27 22:52:03 +00001574 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001575 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001576 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001577 be_queue_free(adapter, q);
1578}
1579
1580/* Must be called only after TX qs are created as MCC shares TX EQ */
1581static int be_mcc_queues_create(struct be_adapter *adapter)
1582{
1583 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001584
1585 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001586 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001587 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001588 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001589 goto err;
1590
1591 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001592 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001593 goto mcc_cq_free;
1594
1595 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001596 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001597 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1598 goto mcc_cq_destroy;
1599
1600 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001601 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001602 goto mcc_q_free;
1603
1604 return 0;
1605
1606mcc_q_free:
1607 be_queue_free(adapter, q);
1608mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001609 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001610mcc_cq_free:
1611 be_queue_free(adapter, cq);
1612err:
1613 return -1;
1614}
1615
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001616static void be_tx_queues_destroy(struct be_adapter *adapter)
1617{
1618 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001619 struct be_tx_obj *txo;
1620 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001621
Sathya Perla3c8def92011-06-12 20:01:58 +00001622 for_all_tx_queues(adapter, txo, i) {
1623 q = &txo->q;
1624 if (q->created)
1625 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1626 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001627
Sathya Perla3c8def92011-06-12 20:01:58 +00001628 q = &txo->cq;
1629 if (q->created)
1630 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1631 be_queue_free(adapter, q);
1632 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001633
Sathya Perla859b1e42009-08-10 03:43:51 +00001634 /* Clear any residual events */
1635 be_eq_clean(adapter, &adapter->tx_eq);
1636
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001637 q = &adapter->tx_eq.q;
1638 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001639 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001640 be_queue_free(adapter, q);
1641}
1642
Sathya Perladafc0fe2011-10-24 02:45:02 +00001643static int be_num_txqs_want(struct be_adapter *adapter)
1644{
1645 if ((num_vfs && adapter->sriov_enabled) ||
Sathya Perla752961a2011-10-24 02:45:03 +00001646 be_is_mc(adapter) ||
Sathya Perladafc0fe2011-10-24 02:45:02 +00001647 lancer_chip(adapter) || !be_physfn(adapter) ||
1648 adapter->generation == BE_GEN2)
1649 return 1;
1650 else
1651 return MAX_TX_QS;
1652}
1653
Sathya Perla3c8def92011-06-12 20:01:58 +00001654/* One TX event queue is shared by all TX compl qs */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001655static int be_tx_queues_create(struct be_adapter *adapter)
1656{
1657 struct be_queue_info *eq, *q, *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00001658 struct be_tx_obj *txo;
1659 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001660
Sathya Perladafc0fe2011-10-24 02:45:02 +00001661 adapter->num_tx_qs = be_num_txqs_want(adapter);
1662 if (adapter->num_tx_qs != MAX_TX_QS)
1663 netif_set_real_num_tx_queues(adapter->netdev,
1664 adapter->num_tx_qs);
1665
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001666 adapter->tx_eq.max_eqd = 0;
1667 adapter->tx_eq.min_eqd = 0;
1668 adapter->tx_eq.cur_eqd = 96;
1669 adapter->tx_eq.enable_aic = false;
Sathya Perla3c8def92011-06-12 20:01:58 +00001670
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001671 eq = &adapter->tx_eq.q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001672 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1673 sizeof(struct be_eq_entry)))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001674 return -1;
1675
Sathya Perla8788fdc2009-07-27 22:52:03 +00001676 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla3c8def92011-06-12 20:01:58 +00001677 goto err;
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001678 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001679
Sathya Perla3c8def92011-06-12 20:01:58 +00001680 for_all_tx_queues(adapter, txo, i) {
1681 cq = &txo->cq;
1682 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001683 sizeof(struct be_eth_tx_compl)))
Sathya Perla3c8def92011-06-12 20:01:58 +00001684 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001685
Sathya Perla3c8def92011-06-12 20:01:58 +00001686 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1687 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001688
Sathya Perla3c8def92011-06-12 20:01:58 +00001689 q = &txo->q;
1690 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1691 sizeof(struct be_eth_wrb)))
1692 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001693
Sathya Perla3c8def92011-06-12 20:01:58 +00001694 if (be_cmd_txq_create(adapter, q, cq))
1695 goto err;
1696 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001697 return 0;
1698
Sathya Perla3c8def92011-06-12 20:01:58 +00001699err:
1700 be_tx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001701 return -1;
1702}
1703
1704static void be_rx_queues_destroy(struct be_adapter *adapter)
1705{
1706 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001707 struct be_rx_obj *rxo;
1708 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001709
Sathya Perla3abcded2010-10-03 22:12:27 -07001710 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00001711 be_queue_free(adapter, &rxo->q);
Sathya Perla89420422010-02-17 01:35:26 +00001712
Sathya Perla3abcded2010-10-03 22:12:27 -07001713 q = &rxo->cq;
1714 if (q->created)
1715 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1716 be_queue_free(adapter, q);
1717
Sathya Perla3abcded2010-10-03 22:12:27 -07001718 q = &rxo->rx_eq.q;
Sathya Perla482c9e72011-06-29 23:33:17 +00001719 if (q->created)
Sathya Perla3abcded2010-10-03 22:12:27 -07001720 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla3abcded2010-10-03 22:12:27 -07001721 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001722 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001723}
1724
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001725static u32 be_num_rxqs_want(struct be_adapter *adapter)
1726{
Sathya Perlac814fd32011-06-26 20:41:25 +00001727 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Sathya Perla752961a2011-10-24 02:45:03 +00001728 !adapter->sriov_enabled && be_physfn(adapter) &&
1729 !be_is_mc(adapter)) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001730 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1731 } else {
1732 dev_warn(&adapter->pdev->dev,
1733 "No support for multiple RX queues\n");
1734 return 1;
1735 }
1736}
1737
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001738static int be_rx_queues_create(struct be_adapter *adapter)
1739{
1740 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001741 struct be_rx_obj *rxo;
1742 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001743
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001744 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1745 msix_enabled(adapter) ?
1746 adapter->num_msix_vec - 1 : 1);
1747 if (adapter->num_rx_qs != MAX_RX_QS)
1748 dev_warn(&adapter->pdev->dev,
1749 "Can create only %d RX queues", adapter->num_rx_qs);
1750
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001751 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001752 for_all_rx_queues(adapter, rxo, i) {
1753 rxo->adapter = adapter;
1754 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1755 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001756
Sathya Perla3abcded2010-10-03 22:12:27 -07001757 /* EQ */
1758 eq = &rxo->rx_eq.q;
1759 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1760 sizeof(struct be_eq_entry));
1761 if (rc)
1762 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001763
Sathya Perla3abcded2010-10-03 22:12:27 -07001764 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1765 if (rc)
1766 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001767
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001768 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001769
Sathya Perla3abcded2010-10-03 22:12:27 -07001770 /* CQ */
1771 cq = &rxo->cq;
1772 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1773 sizeof(struct be_eth_rx_compl));
1774 if (rc)
1775 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001776
Sathya Perla3abcded2010-10-03 22:12:27 -07001777 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1778 if (rc)
1779 goto err;
Sathya Perla482c9e72011-06-29 23:33:17 +00001780
1781 /* Rx Q - will be created in be_open() */
Sathya Perla3abcded2010-10-03 22:12:27 -07001782 q = &rxo->q;
1783 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1784 sizeof(struct be_eth_rx_d));
1785 if (rc)
1786 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001787
Sathya Perla3abcded2010-10-03 22:12:27 -07001788 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001789
1790 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001791err:
1792 be_rx_queues_destroy(adapter);
1793 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001794}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001795
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001796static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001797{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001798 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1799 if (!eqe->evt)
1800 return false;
1801 else
1802 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001803}
1804
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001805static irqreturn_t be_intx(int irq, void *dev)
1806{
1807 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001808 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001809 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001810
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001811 if (lancer_chip(adapter)) {
1812 if (event_peek(&adapter->tx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001813 tx = event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001814 for_all_rx_queues(adapter, rxo, i) {
1815 if (event_peek(&rxo->rx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001816 rx |= event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001817 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001818
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001819 if (!(tx || rx))
1820 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001821
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001822 } else {
1823 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1824 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1825 if (!isr)
1826 return IRQ_NONE;
1827
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001828 if ((1 << adapter->tx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001829 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001830
1831 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001832 if ((1 << rxo->rx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001833 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001834 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001835 }
Sathya Perlac001c212009-07-01 01:06:07 +00001836
Sathya Perla8788fdc2009-07-27 22:52:03 +00001837 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001838}
1839
1840static irqreturn_t be_msix_rx(int irq, void *dev)
1841{
Sathya Perla3abcded2010-10-03 22:12:27 -07001842 struct be_rx_obj *rxo = dev;
1843 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001844
Sathya Perla3c8def92011-06-12 20:01:58 +00001845 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001846
1847 return IRQ_HANDLED;
1848}
1849
Sathya Perla5fb379e2009-06-18 00:02:59 +00001850static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001851{
1852 struct be_adapter *adapter = dev;
1853
Sathya Perla3c8def92011-06-12 20:01:58 +00001854 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001855
1856 return IRQ_HANDLED;
1857}
1858
Sathya Perla2e588f82011-03-11 02:49:26 +00001859static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001860{
Sathya Perla2e588f82011-03-11 02:49:26 +00001861 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001862}
1863
stephen hemminger49b05222010-10-21 07:50:48 +00001864static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001865{
1866 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001867 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1868 struct be_adapter *adapter = rxo->adapter;
1869 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001870 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001871 u32 work_done;
1872
Sathya Perlaac124ff2011-07-25 19:10:14 +00001873 rx_stats(rxo)->rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001874 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001875 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001876 if (!rxcp)
1877 break;
1878
Sathya Perla12004ae2011-08-02 19:57:46 +00001879 /* Is it a flush compl that has no data */
1880 if (unlikely(rxcp->num_rcvd == 0))
1881 goto loop_continue;
1882
1883 /* Discard compl with partial DMA Lancer B0 */
1884 if (unlikely(!rxcp->pkt_size)) {
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001885 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001886 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00001887 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001888
Sathya Perla12004ae2011-08-02 19:57:46 +00001889 /* On BE drop pkts that arrive due to imperfect filtering in
1890 * promiscuous mode on some skews
1891 */
1892 if (unlikely(rxcp->port != adapter->port_num &&
1893 !lancer_chip(adapter))) {
1894 be_rx_compl_discard(adapter, rxo, rxcp);
1895 goto loop_continue;
1896 }
1897
1898 if (do_gro(rxcp))
1899 be_rx_compl_process_gro(adapter, rxo, rxcp);
1900 else
1901 be_rx_compl_process(adapter, rxo, rxcp);
1902loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00001903 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001904 }
1905
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001906 /* Refill the queue */
Sathya Perla857c9902011-08-22 19:41:51 +00001907 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
Eric Dumazet1829b082011-03-01 05:48:12 +00001908 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001909
1910 /* All consumed */
1911 if (work_done < budget) {
1912 napi_complete(napi);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001913 be_cq_notify(adapter, rx_cq->id, true, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001914 } else {
1915 /* More to be consumed; continue with interrupts disabled */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001916 be_cq_notify(adapter, rx_cq->id, false, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001917 }
1918 return work_done;
1919}
1920
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001921/* As TX and MCC share the same EQ check for both TX and MCC completions.
1922 * For TX/MCC we don't honour budget; consume everything
1923 */
1924static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001925{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001926 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1927 struct be_adapter *adapter =
1928 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla3c8def92011-06-12 20:01:58 +00001929 struct be_tx_obj *txo;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001930 struct be_eth_tx_compl *txcp;
Sathya Perla3c8def92011-06-12 20:01:58 +00001931 int tx_compl, mcc_compl, status = 0;
1932 u8 i;
1933 u16 num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001934
Sathya Perla3c8def92011-06-12 20:01:58 +00001935 for_all_tx_queues(adapter, txo, i) {
1936 tx_compl = 0;
1937 num_wrbs = 0;
1938 while ((txcp = be_tx_compl_get(&txo->cq))) {
1939 num_wrbs += be_tx_compl_process(adapter, txo,
1940 AMAP_GET_BITS(struct amap_eth_tx_compl,
1941 wrb_index, txcp));
1942 tx_compl++;
1943 }
1944 if (tx_compl) {
1945 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1946
1947 atomic_sub(num_wrbs, &txo->q.used);
1948
1949 /* As Tx wrbs have been freed up, wake up netdev queue
1950 * if it was stopped due to lack of tx wrbs. */
1951 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1952 atomic_read(&txo->q.used) < txo->q.len / 2) {
1953 netif_wake_subqueue(adapter->netdev, i);
1954 }
1955
Sathya Perlaab1594e2011-07-25 19:10:15 +00001956 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001957 tx_stats(txo)->tx_compl += tx_compl;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001958 u64_stats_update_end(&tx_stats(txo)->sync_compl);
Sathya Perla3c8def92011-06-12 20:01:58 +00001959 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001960 }
1961
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001962 mcc_compl = be_process_mcc(adapter, &status);
1963
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001964 if (mcc_compl) {
1965 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1966 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1967 }
1968
Sathya Perla3c8def92011-06-12 20:01:58 +00001969 napi_complete(napi);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001970
Sathya Perla3c8def92011-06-12 20:01:58 +00001971 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001972 adapter->drv_stats.tx_events++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001973 return 1;
1974}
1975
Ajit Khaparded053de92010-09-03 06:23:30 +00001976void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00001977{
1978 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1979 u32 i;
1980
1981 pci_read_config_dword(adapter->pdev,
1982 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1983 pci_read_config_dword(adapter->pdev,
1984 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1985 pci_read_config_dword(adapter->pdev,
1986 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1987 pci_read_config_dword(adapter->pdev,
1988 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1989
1990 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1991 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1992
Ajit Khaparded053de92010-09-03 06:23:30 +00001993 if (ue_status_lo || ue_status_hi) {
1994 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00001995 adapter->eeh_err = true;
Ajit Khaparded053de92010-09-03 06:23:30 +00001996 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1997 }
1998
Ajit Khaparde7c185272010-07-29 06:16:33 +00001999 if (ue_status_lo) {
2000 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
2001 if (ue_status_lo & 1)
2002 dev_err(&adapter->pdev->dev,
2003 "UE: %s bit set\n", ue_status_low_desc[i]);
2004 }
2005 }
2006 if (ue_status_hi) {
2007 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
2008 if (ue_status_hi & 1)
2009 dev_err(&adapter->pdev->dev,
2010 "UE: %s bit set\n", ue_status_hi_desc[i]);
2011 }
2012 }
2013
2014}
2015
Sathya Perlaea1dae12009-03-19 23:56:20 -07002016static void be_worker(struct work_struct *work)
2017{
2018 struct be_adapter *adapter =
2019 container_of(work, struct be_adapter, work.work);
Sathya Perla3abcded2010-10-03 22:12:27 -07002020 struct be_rx_obj *rxo;
2021 int i;
Sathya Perlaea1dae12009-03-19 23:56:20 -07002022
Sathya Perla16da8252011-03-21 20:49:27 +00002023 if (!adapter->ue_detected && !lancer_chip(adapter))
2024 be_detect_dump_ue(adapter);
2025
Somnath Koturf203af72010-10-25 23:01:03 +00002026 /* when interrupts are not yet enabled, just reap any pending
2027 * mcc completions */
2028 if (!netif_running(adapter->netdev)) {
2029 int mcc_compl, status = 0;
2030
2031 mcc_compl = be_process_mcc(adapter, &status);
2032
2033 if (mcc_compl) {
2034 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2035 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2036 }
Ajit Khaparde9b037f32011-02-11 13:38:29 +00002037
Somnath Koturf203af72010-10-25 23:01:03 +00002038 goto reschedule;
2039 }
2040
Selvin Xavier005d5692011-05-16 07:36:35 +00002041 if (!adapter->stats_cmd_sent) {
2042 if (lancer_chip(adapter))
2043 lancer_cmd_get_pport_stats(adapter,
2044 &adapter->stats_cmd);
2045 else
2046 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2047 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002048
Sathya Perla3abcded2010-10-03 22:12:27 -07002049 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002050 be_rx_eqd_update(adapter, rxo);
2051
2052 if (rxo->rx_post_starved) {
2053 rxo->rx_post_starved = false;
Eric Dumazet1829b082011-03-01 05:48:12 +00002054 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla3abcded2010-10-03 22:12:27 -07002055 }
Sathya Perlaea1dae12009-03-19 23:56:20 -07002056 }
2057
Somnath Koturf203af72010-10-25 23:01:03 +00002058reschedule:
Ivan Vecerae74fbd032011-04-21 00:20:04 +00002059 adapter->work_counter++;
Sathya Perlaea1dae12009-03-19 23:56:20 -07002060 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2061}
2062
Sathya Perla8d56ff12009-11-22 22:02:26 +00002063static void be_msix_disable(struct be_adapter *adapter)
2064{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002065 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002066 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002067 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002068 }
2069}
2070
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002071static void be_msix_enable(struct be_adapter *adapter)
2072{
Sathya Perla3abcded2010-10-03 22:12:27 -07002073#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002074 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002075
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002076 num_vec = be_num_rxqs_want(adapter) + 1;
Sathya Perla3abcded2010-10-03 22:12:27 -07002077
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002078 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002079 adapter->msix_entries[i].entry = i;
2080
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002081 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002082 if (status == 0) {
2083 goto done;
2084 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002085 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002086 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002087 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002088 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002089 }
2090 return;
2091done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002092 adapter->num_msix_vec = num_vec;
2093 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002094}
2095
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002096static int be_sriov_enable(struct be_adapter *adapter)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002097{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002098 be_check_sriov_fn_type(adapter);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002099#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002100 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002101 int status, pos;
2102 u16 nvfs;
2103
2104 pos = pci_find_ext_capability(adapter->pdev,
2105 PCI_EXT_CAP_ID_SRIOV);
2106 pci_read_config_word(adapter->pdev,
2107 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2108
2109 if (num_vfs > nvfs) {
2110 dev_info(&adapter->pdev->dev,
2111 "Device supports %d VFs and not %d\n",
2112 nvfs, num_vfs);
2113 num_vfs = nvfs;
2114 }
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002115
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002116 status = pci_enable_sriov(adapter->pdev, num_vfs);
2117 adapter->sriov_enabled = status ? false : true;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002118
2119 if (adapter->sriov_enabled) {
2120 adapter->vf_cfg = kcalloc(num_vfs,
2121 sizeof(struct be_vf_cfg),
2122 GFP_KERNEL);
2123 if (!adapter->vf_cfg)
2124 return -ENOMEM;
2125 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002126 }
2127#endif
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002128 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002129}
2130
2131static void be_sriov_disable(struct be_adapter *adapter)
2132{
2133#ifdef CONFIG_PCI_IOV
2134 if (adapter->sriov_enabled) {
2135 pci_disable_sriov(adapter->pdev);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002136 kfree(adapter->vf_cfg);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002137 adapter->sriov_enabled = false;
2138 }
2139#endif
2140}
2141
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002142static inline int be_msix_vec_get(struct be_adapter *adapter,
2143 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002144{
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00002145 return adapter->msix_entries[eq_obj->eq_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00002146}
2147
2148static int be_request_irq(struct be_adapter *adapter,
2149 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07002150 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002151{
2152 struct net_device *netdev = adapter->netdev;
2153 int vec;
2154
2155 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002156 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002157 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00002158}
2159
Sathya Perla3abcded2010-10-03 22:12:27 -07002160static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2161 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002162{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002163 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002164 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002165}
2166
2167static int be_msix_register(struct be_adapter *adapter)
2168{
Sathya Perla3abcded2010-10-03 22:12:27 -07002169 struct be_rx_obj *rxo;
2170 int status, i;
2171 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002172
Sathya Perla3abcded2010-10-03 22:12:27 -07002173 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2174 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002175 if (status)
2176 goto err;
2177
Sathya Perla3abcded2010-10-03 22:12:27 -07002178 for_all_rx_queues(adapter, rxo, i) {
2179 sprintf(qname, "rxq%d", i);
2180 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2181 qname, rxo);
2182 if (status)
2183 goto err_msix;
2184 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002185
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002186 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002187
Sathya Perla3abcded2010-10-03 22:12:27 -07002188err_msix:
2189 be_free_irq(adapter, &adapter->tx_eq, adapter);
2190
2191 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2192 be_free_irq(adapter, &rxo->rx_eq, rxo);
2193
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002194err:
2195 dev_warn(&adapter->pdev->dev,
2196 "MSIX Request IRQ failed - err %d\n", status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002197 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002198 return status;
2199}
2200
2201static int be_irq_register(struct be_adapter *adapter)
2202{
2203 struct net_device *netdev = adapter->netdev;
2204 int status;
2205
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002206 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002207 status = be_msix_register(adapter);
2208 if (status == 0)
2209 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002210 /* INTx is not supported for VF */
2211 if (!be_physfn(adapter))
2212 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002213 }
2214
2215 /* INTx */
2216 netdev->irq = adapter->pdev->irq;
2217 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2218 adapter);
2219 if (status) {
2220 dev_err(&adapter->pdev->dev,
2221 "INTx request IRQ failed - err %d\n", status);
2222 return status;
2223 }
2224done:
2225 adapter->isr_registered = true;
2226 return 0;
2227}
2228
2229static void be_irq_unregister(struct be_adapter *adapter)
2230{
2231 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002232 struct be_rx_obj *rxo;
2233 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002234
2235 if (!adapter->isr_registered)
2236 return;
2237
2238 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002239 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002240 free_irq(netdev->irq, adapter);
2241 goto done;
2242 }
2243
2244 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002245 be_free_irq(adapter, &adapter->tx_eq, adapter);
2246
2247 for_all_rx_queues(adapter, rxo, i)
2248 be_free_irq(adapter, &rxo->rx_eq, rxo);
2249
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002250done:
2251 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002252}
2253
Sathya Perla482c9e72011-06-29 23:33:17 +00002254static void be_rx_queues_clear(struct be_adapter *adapter)
2255{
2256 struct be_queue_info *q;
2257 struct be_rx_obj *rxo;
2258 int i;
2259
2260 for_all_rx_queues(adapter, rxo, i) {
2261 q = &rxo->q;
2262 if (q->created) {
2263 be_cmd_rxq_destroy(adapter, q);
2264 /* After the rxq is invalidated, wait for a grace time
2265 * of 1ms for all dma to end and the flush compl to
2266 * arrive
2267 */
2268 mdelay(1);
2269 be_rx_q_clean(adapter, rxo);
2270 }
2271
2272 /* Clear any residual events */
2273 q = &rxo->rx_eq.q;
2274 if (q->created)
2275 be_eq_clean(adapter, &rxo->rx_eq);
2276 }
2277}
2278
Sathya Perla889cd4b2010-05-30 23:33:45 +00002279static int be_close(struct net_device *netdev)
2280{
2281 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002282 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002283 struct be_tx_obj *txo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002284 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002285 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002286
Sathya Perla889cd4b2010-05-30 23:33:45 +00002287 be_async_mcc_disable(adapter);
2288
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002289 if (!lancer_chip(adapter))
2290 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002291
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002292 for_all_rx_queues(adapter, rxo, i)
2293 napi_disable(&rxo->rx_eq.napi);
2294
2295 napi_disable(&tx_eq->napi);
2296
2297 if (lancer_chip(adapter)) {
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002298 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2299 for_all_rx_queues(adapter, rxo, i)
2300 be_cq_notify(adapter, rxo->cq.id, false, 0);
Sathya Perla3c8def92011-06-12 20:01:58 +00002301 for_all_tx_queues(adapter, txo, i)
2302 be_cq_notify(adapter, txo->cq.id, false, 0);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002303 }
2304
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002305 if (msix_enabled(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002306 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002307 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002308
2309 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002310 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002311 synchronize_irq(vec);
2312 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002313 } else {
2314 synchronize_irq(netdev->irq);
2315 }
2316 be_irq_unregister(adapter);
2317
Sathya Perla889cd4b2010-05-30 23:33:45 +00002318 /* Wait for all pending tx completions to arrive so that
2319 * all tx skbs are freed.
2320 */
Sathya Perla3c8def92011-06-12 20:01:58 +00002321 for_all_tx_queues(adapter, txo, i)
2322 be_tx_compl_clean(adapter, txo);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002323
Sathya Perla482c9e72011-06-29 23:33:17 +00002324 be_rx_queues_clear(adapter);
2325 return 0;
2326}
2327
2328static int be_rx_queues_setup(struct be_adapter *adapter)
2329{
2330 struct be_rx_obj *rxo;
2331 int rc, i;
2332 u8 rsstable[MAX_RSS_QS];
2333
2334 for_all_rx_queues(adapter, rxo, i) {
2335 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2336 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2337 adapter->if_handle,
2338 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2339 if (rc)
2340 return rc;
2341 }
2342
2343 if (be_multi_rxq(adapter)) {
2344 for_all_rss_queues(adapter, rxo, i)
2345 rsstable[i] = rxo->rss_id;
2346
2347 rc = be_cmd_rss_config(adapter, rsstable,
2348 adapter->num_rx_qs - 1);
2349 if (rc)
2350 return rc;
2351 }
2352
2353 /* First time posting */
2354 for_all_rx_queues(adapter, rxo, i) {
2355 be_post_rx_frags(rxo, GFP_KERNEL);
2356 napi_enable(&rxo->rx_eq.napi);
2357 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002358 return 0;
2359}
2360
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002361static int be_open(struct net_device *netdev)
2362{
2363 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002364 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002365 struct be_rx_obj *rxo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002366 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002367
Sathya Perla482c9e72011-06-29 23:33:17 +00002368 status = be_rx_queues_setup(adapter);
2369 if (status)
2370 goto err;
2371
Sathya Perla5fb379e2009-06-18 00:02:59 +00002372 napi_enable(&tx_eq->napi);
2373
2374 be_irq_register(adapter);
2375
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002376 if (!lancer_chip(adapter))
2377 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002378
2379 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002380 for_all_rx_queues(adapter, rxo, i) {
2381 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2382 be_cq_notify(adapter, rxo->cq.id, true, 0);
2383 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002384 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002385
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002386 /* Now that interrupts are on we can process async mcc */
2387 be_async_mcc_enable(adapter);
2388
Sathya Perla889cd4b2010-05-30 23:33:45 +00002389 return 0;
2390err:
2391 be_close(adapter->netdev);
2392 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002393}
2394
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002395static int be_setup_wol(struct be_adapter *adapter, bool enable)
2396{
2397 struct be_dma_mem cmd;
2398 int status = 0;
2399 u8 mac[ETH_ALEN];
2400
2401 memset(mac, 0, ETH_ALEN);
2402
2403 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002404 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2405 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002406 if (cmd.va == NULL)
2407 return -1;
2408 memset(cmd.va, 0, cmd.size);
2409
2410 if (enable) {
2411 status = pci_write_config_dword(adapter->pdev,
2412 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2413 if (status) {
2414 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002415 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002416 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2417 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002418 return status;
2419 }
2420 status = be_cmd_enable_magic_wol(adapter,
2421 adapter->netdev->dev_addr, &cmd);
2422 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2423 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2424 } else {
2425 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2426 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2427 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2428 }
2429
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002430 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002431 return status;
2432}
2433
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002434/*
2435 * Generate a seed MAC address from the PF MAC Address using jhash.
2436 * MAC Address for VFs are assigned incrementally starting from the seed.
2437 * These addresses are programmed in the ASIC by the PF and the VF driver
2438 * queries for the MAC address during its probe.
2439 */
2440static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2441{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002442 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002443 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002444 u8 mac[ETH_ALEN];
2445
2446 be_vf_eth_addr_generate(adapter, mac);
2447
2448 for (vf = 0; vf < num_vfs; vf++) {
2449 status = be_cmd_pmac_add(adapter, mac,
2450 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002451 &adapter->vf_cfg[vf].vf_pmac_id,
2452 vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002453 if (status)
2454 dev_err(&adapter->pdev->dev,
2455 "Mac address add failed for VF %d\n", vf);
2456 else
2457 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2458
2459 mac[5] += 1;
2460 }
2461 return status;
2462}
2463
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002464static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002465{
2466 u32 vf;
2467
2468 for (vf = 0; vf < num_vfs; vf++) {
2469 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2470 be_cmd_pmac_del(adapter,
2471 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002472 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002473 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002474
2475 for (vf = 0; vf < num_vfs; vf++)
2476 if (adapter->vf_cfg[vf].vf_if_handle)
2477 be_cmd_if_destroy(adapter,
2478 adapter->vf_cfg[vf].vf_if_handle, vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002479}
2480
Sathya Perlaa54769f2011-10-24 02:45:00 +00002481static int be_clear(struct be_adapter *adapter)
2482{
Sathya Perlaa54769f2011-10-24 02:45:00 +00002483 if (be_physfn(adapter) && adapter->sriov_enabled)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002484 be_vf_clear(adapter);
2485
2486 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002487
2488 be_mcc_queues_destroy(adapter);
2489 be_rx_queues_destroy(adapter);
2490 be_tx_queues_destroy(adapter);
2491 adapter->eq_next_idx = 0;
2492
Sathya Perlaa54769f2011-10-24 02:45:00 +00002493 adapter->be3_native = false;
2494 adapter->promiscuous = false;
2495
2496 /* tell fw we're done with firing cmds */
2497 be_cmd_fw_clean(adapter);
2498 return 0;
2499}
2500
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002501static int be_vf_setup(struct be_adapter *adapter)
2502{
2503 u32 cap_flags, en_flags, vf;
2504 u16 lnk_speed;
2505 int status;
2506
2507 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2508 for (vf = 0; vf < num_vfs; vf++) {
2509 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2510 &adapter->vf_cfg[vf].vf_if_handle,
2511 NULL, vf+1);
2512 if (status)
2513 goto err;
2514 adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
2515 }
2516
2517 if (!lancer_chip(adapter)) {
2518 status = be_vf_eth_addr_config(adapter);
2519 if (status)
2520 goto err;
2521 }
2522
2523 for (vf = 0; vf < num_vfs; vf++) {
2524 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2525 vf + 1);
2526 if (status)
2527 goto err;
2528 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
2529 }
2530 return 0;
2531err:
2532 return status;
2533}
2534
Sathya Perla5fb379e2009-06-18 00:02:59 +00002535static int be_setup(struct be_adapter *adapter)
2536{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002537 struct net_device *netdev = adapter->netdev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002538 u32 cap_flags, en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002539 u32 tx_fc, rx_fc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002540 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002541 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002542
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002543 /* Allow all priorities by default. A GRP5 evt may modify this */
2544 adapter->vlan_prio_bmap = 0xff;
2545 adapter->link_speed = -1;
2546
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002547 be_cmd_req_native_mode(adapter);
2548
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002549 status = be_tx_queues_create(adapter);
2550 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002551 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002552
2553 status = be_rx_queues_create(adapter);
2554 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002555 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002556
Sathya Perla5fb379e2009-06-18 00:02:59 +00002557 status = be_mcc_queues_create(adapter);
2558 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002559 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002560
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002561 memset(mac, 0, ETH_ALEN);
2562 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
2563 true /*permanent */, 0);
2564 if (status)
2565 return status;
2566 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2567 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2568
2569 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2570 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2571 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2572 BE_IF_FLAGS_PROMISCUOUS;
2573 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2574 cap_flags |= BE_IF_FLAGS_RSS;
2575 en_flags |= BE_IF_FLAGS_RSS;
2576 }
2577 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2578 netdev->dev_addr, &adapter->if_handle,
2579 &adapter->pmac_id, 0);
2580 if (status != 0)
2581 goto err;
2582
2583 /* For BEx, the VF's permanent mac queried from card is incorrect.
2584 * Query the mac configued by the PF using if_handle
2585 */
2586 if (!be_physfn(adapter) && !lancer_chip(adapter)) {
2587 status = be_cmd_mac_addr_query(adapter, mac,
2588 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2589 if (!status) {
2590 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2591 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2592 }
2593 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002594
Sathya Perla04b71172011-09-27 13:30:27 -04002595 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002596
Sathya Perlaa54769f2011-10-24 02:45:00 +00002597 status = be_vid_config(adapter, false, 0);
2598 if (status)
2599 goto err;
2600
2601 be_set_rx_mode(adapter->netdev);
2602
2603 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2604 if (status)
2605 goto err;
2606 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2607 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2608 adapter->rx_fc);
2609 if (status)
2610 goto err;
2611 }
2612
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002613 pcie_set_readrq(adapter->pdev, 4096);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002614
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002615 if (be_physfn(adapter) && adapter->sriov_enabled) {
2616 status = be_vf_setup(adapter);
2617 if (status)
2618 goto err;
2619 }
2620
2621 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002622err:
2623 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002624 return status;
2625}
2626
Ajit Khaparde84517482009-09-04 03:12:16 +00002627#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002628static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002629 const u8 *p, u32 img_start, int image_size,
2630 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002631{
2632 u32 crc_offset;
2633 u8 flashed_crc[4];
2634 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002635
2636 crc_offset = hdr_size + img_start + image_size - 4;
2637
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002638 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002639
2640 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002641 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002642 if (status) {
2643 dev_err(&adapter->pdev->dev,
2644 "could not get crc from flash, not flashing redboot\n");
2645 return false;
2646 }
2647
2648 /*update redboot only if crc does not match*/
2649 if (!memcmp(flashed_crc, p, 4))
2650 return false;
2651 else
2652 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002653}
2654
Sathya Perla306f1342011-08-02 19:57:45 +00002655static bool phy_flashing_required(struct be_adapter *adapter)
2656{
2657 int status = 0;
2658 struct be_phy_info phy_info;
2659
2660 status = be_cmd_get_phy_info(adapter, &phy_info);
2661 if (status)
2662 return false;
2663 if ((phy_info.phy_type == TN_8022) &&
2664 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2665 return true;
2666 }
2667 return false;
2668}
2669
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002670static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002671 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002672 struct be_dma_mem *flash_cmd, int num_of_images)
2673
Ajit Khaparde84517482009-09-04 03:12:16 +00002674{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002675 int status = 0, i, filehdr_size = 0;
2676 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002677 int num_bytes;
2678 const u8 *p = fw->data;
2679 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002680 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002681 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002682
Sathya Perla306f1342011-08-02 19:57:45 +00002683 static const struct flash_comp gen3_flash_types[10] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002684 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2685 FLASH_IMAGE_MAX_SIZE_g3},
2686 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2687 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2688 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2689 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2690 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2691 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2692 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2693 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2694 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2695 FLASH_IMAGE_MAX_SIZE_g3},
2696 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2697 FLASH_IMAGE_MAX_SIZE_g3},
2698 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002699 FLASH_IMAGE_MAX_SIZE_g3},
2700 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
Sathya Perla306f1342011-08-02 19:57:45 +00002701 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2702 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2703 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002704 };
Joe Perches215faf92010-12-21 02:16:10 -08002705 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002706 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2707 FLASH_IMAGE_MAX_SIZE_g2},
2708 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2709 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2710 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2711 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2712 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2713 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2714 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2715 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2716 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2717 FLASH_IMAGE_MAX_SIZE_g2},
2718 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2719 FLASH_IMAGE_MAX_SIZE_g2},
2720 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2721 FLASH_IMAGE_MAX_SIZE_g2}
2722 };
2723
2724 if (adapter->generation == BE_GEN3) {
2725 pflashcomp = gen3_flash_types;
2726 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002727 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002728 } else {
2729 pflashcomp = gen2_flash_types;
2730 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002731 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002732 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002733 for (i = 0; i < num_comp; i++) {
2734 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2735 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2736 continue;
Sathya Perla306f1342011-08-02 19:57:45 +00002737 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2738 if (!phy_flashing_required(adapter))
2739 continue;
2740 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002741 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2742 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002743 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2744 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002745 continue;
2746 p = fw->data;
2747 p += filehdr_size + pflashcomp[i].offset
2748 + (num_of_images * sizeof(struct image_hdr));
Sathya Perla306f1342011-08-02 19:57:45 +00002749 if (p + pflashcomp[i].size > fw->data + fw->size)
2750 return -1;
2751 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002752 while (total_bytes) {
2753 if (total_bytes > 32*1024)
2754 num_bytes = 32*1024;
2755 else
2756 num_bytes = total_bytes;
2757 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00002758 if (!total_bytes) {
2759 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2760 flash_op = FLASHROM_OPER_PHY_FLASH;
2761 else
2762 flash_op = FLASHROM_OPER_FLASH;
2763 } else {
2764 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2765 flash_op = FLASHROM_OPER_PHY_SAVE;
2766 else
2767 flash_op = FLASHROM_OPER_SAVE;
2768 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002769 memcpy(req->params.data_buf, p, num_bytes);
2770 p += num_bytes;
2771 status = be_cmd_write_flashrom(adapter, flash_cmd,
2772 pflashcomp[i].optype, flash_op, num_bytes);
2773 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00002774 if ((status == ILLEGAL_IOCTL_REQ) &&
2775 (pflashcomp[i].optype ==
2776 IMG_TYPE_PHY_FW))
2777 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002778 dev_err(&adapter->pdev->dev,
2779 "cmd to write to flash rom failed.\n");
2780 return -1;
2781 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002782 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002783 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002784 return 0;
2785}
2786
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002787static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2788{
2789 if (fhdr == NULL)
2790 return 0;
2791 if (fhdr->build[0] == '3')
2792 return BE_GEN3;
2793 else if (fhdr->build[0] == '2')
2794 return BE_GEN2;
2795 else
2796 return 0;
2797}
2798
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002799static int lancer_fw_download(struct be_adapter *adapter,
2800 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00002801{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002802#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2803#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2804 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002805 const u8 *data_ptr = NULL;
2806 u8 *dest_image_ptr = NULL;
2807 size_t image_size = 0;
2808 u32 chunk_size = 0;
2809 u32 data_written = 0;
2810 u32 offset = 0;
2811 int status = 0;
2812 u8 add_status = 0;
2813
2814 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2815 dev_err(&adapter->pdev->dev,
2816 "FW Image not properly aligned. "
2817 "Length must be 4 byte aligned.\n");
2818 status = -EINVAL;
2819 goto lancer_fw_exit;
2820 }
2821
2822 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2823 + LANCER_FW_DOWNLOAD_CHUNK;
2824 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2825 &flash_cmd.dma, GFP_KERNEL);
2826 if (!flash_cmd.va) {
2827 status = -ENOMEM;
2828 dev_err(&adapter->pdev->dev,
2829 "Memory allocation failure while flashing\n");
2830 goto lancer_fw_exit;
2831 }
2832
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002833 dest_image_ptr = flash_cmd.va +
2834 sizeof(struct lancer_cmd_req_write_object);
2835 image_size = fw->size;
2836 data_ptr = fw->data;
2837
2838 while (image_size) {
2839 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2840
2841 /* Copy the image chunk content. */
2842 memcpy(dest_image_ptr, data_ptr, chunk_size);
2843
2844 status = lancer_cmd_write_object(adapter, &flash_cmd,
2845 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2846 &data_written, &add_status);
2847
2848 if (status)
2849 break;
2850
2851 offset += data_written;
2852 data_ptr += data_written;
2853 image_size -= data_written;
2854 }
2855
2856 if (!status) {
2857 /* Commit the FW written */
2858 status = lancer_cmd_write_object(adapter, &flash_cmd,
2859 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2860 &data_written, &add_status);
2861 }
2862
2863 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2864 flash_cmd.dma);
2865 if (status) {
2866 dev_err(&adapter->pdev->dev,
2867 "Firmware load error. "
2868 "Status code: 0x%x Additional Status: 0x%x\n",
2869 status, add_status);
2870 goto lancer_fw_exit;
2871 }
2872
2873 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2874lancer_fw_exit:
2875 return status;
2876}
2877
2878static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2879{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002880 struct flash_file_hdr_g2 *fhdr;
2881 struct flash_file_hdr_g3 *fhdr3;
2882 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002883 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00002884 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002885 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002886
2887 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002888 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002889
Ajit Khaparde84517482009-09-04 03:12:16 +00002890 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002891 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2892 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00002893 if (!flash_cmd.va) {
2894 status = -ENOMEM;
2895 dev_err(&adapter->pdev->dev,
2896 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002897 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002898 }
2899
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002900 if ((adapter->generation == BE_GEN3) &&
2901 (get_ufigen_type(fhdr) == BE_GEN3)) {
2902 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002903 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2904 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002905 img_hdr_ptr = (struct image_hdr *) (fw->data +
2906 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002907 i * sizeof(struct image_hdr)));
2908 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2909 status = be_flash_data(adapter, fw, &flash_cmd,
2910 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002911 }
2912 } else if ((adapter->generation == BE_GEN2) &&
2913 (get_ufigen_type(fhdr) == BE_GEN2)) {
2914 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2915 } else {
2916 dev_err(&adapter->pdev->dev,
2917 "UFI and Interface are not compatible for flashing\n");
2918 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002919 }
2920
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002921 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2922 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00002923 if (status) {
2924 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002925 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002926 }
2927
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002928 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002929
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002930be_fw_exit:
2931 return status;
2932}
2933
2934int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2935{
2936 const struct firmware *fw;
2937 int status;
2938
2939 if (!netif_running(adapter->netdev)) {
2940 dev_err(&adapter->pdev->dev,
2941 "Firmware load not allowed (interface is down)\n");
2942 return -1;
2943 }
2944
2945 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2946 if (status)
2947 goto fw_exit;
2948
2949 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2950
2951 if (lancer_chip(adapter))
2952 status = lancer_fw_download(adapter, fw);
2953 else
2954 status = be_fw_download(adapter, fw);
2955
Ajit Khaparde84517482009-09-04 03:12:16 +00002956fw_exit:
2957 release_firmware(fw);
2958 return status;
2959}
2960
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002961static struct net_device_ops be_netdev_ops = {
2962 .ndo_open = be_open,
2963 .ndo_stop = be_close,
2964 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00002965 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002966 .ndo_set_mac_address = be_mac_addr_set,
2967 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00002968 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002969 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002970 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2971 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002972 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002973 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00002974 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002975 .ndo_get_vf_config = be_get_vf_config
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002976};
2977
2978static void be_netdev_init(struct net_device *netdev)
2979{
2980 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002981 struct be_rx_obj *rxo;
2982 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002983
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002984 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002985 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2986 NETIF_F_HW_VLAN_TX;
2987 if (be_multi_rxq(adapter))
2988 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002989
2990 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002991 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00002992
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07002993 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00002994 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00002995
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002996 netdev->flags |= IFF_MULTICAST;
2997
Ajit Khapardec190e3c2009-09-04 03:12:29 +00002998 netif_set_gso_max_size(netdev, 65535);
2999
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003000 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
3001
3002 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3003
Sathya Perla3abcded2010-10-03 22:12:27 -07003004 for_all_rx_queues(adapter, rxo, i)
3005 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
3006 BE_NAPI_WEIGHT);
3007
Sathya Perla5fb379e2009-06-18 00:02:59 +00003008 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003009 BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003010}
3011
3012static void be_unmap_pci_bars(struct be_adapter *adapter)
3013{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003014 if (adapter->csr)
3015 iounmap(adapter->csr);
3016 if (adapter->db)
3017 iounmap(adapter->db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003018}
3019
3020static int be_map_pci_bars(struct be_adapter *adapter)
3021{
3022 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003023 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003024
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003025 if (lancer_chip(adapter)) {
3026 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3027 pci_resource_len(adapter->pdev, 0));
3028 if (addr == NULL)
3029 return -ENOMEM;
3030 adapter->db = addr;
3031 return 0;
3032 }
3033
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003034 if (be_physfn(adapter)) {
3035 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3036 pci_resource_len(adapter->pdev, 2));
3037 if (addr == NULL)
3038 return -ENOMEM;
3039 adapter->csr = addr;
3040 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003041
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003042 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003043 db_reg = 4;
3044 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003045 if (be_physfn(adapter))
3046 db_reg = 4;
3047 else
3048 db_reg = 0;
3049 }
3050 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3051 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003052 if (addr == NULL)
3053 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003054 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003055
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003056 return 0;
3057pci_map_err:
3058 be_unmap_pci_bars(adapter);
3059 return -ENOMEM;
3060}
3061
3062
3063static void be_ctrl_cleanup(struct be_adapter *adapter)
3064{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003065 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003066
3067 be_unmap_pci_bars(adapter);
3068
3069 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003070 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3071 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003072
Sathya Perla5b8821b2011-08-02 19:57:44 +00003073 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003074 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003075 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3076 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003077}
3078
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003079static int be_ctrl_init(struct be_adapter *adapter)
3080{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003081 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3082 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003083 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003084 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003085
3086 status = be_map_pci_bars(adapter);
3087 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003088 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003089
3090 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003091 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3092 mbox_mem_alloc->size,
3093 &mbox_mem_alloc->dma,
3094 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003095 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003096 status = -ENOMEM;
3097 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003098 }
3099 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3100 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3101 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3102 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003103
Sathya Perla5b8821b2011-08-02 19:57:44 +00003104 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3105 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3106 &rx_filter->dma, GFP_KERNEL);
3107 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003108 status = -ENOMEM;
3109 goto free_mbox;
3110 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003111 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003112
Ivan Vecera29849612010-12-14 05:43:19 +00003113 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003114 spin_lock_init(&adapter->mcc_lock);
3115 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003116
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003117 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003118 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003119 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003120
3121free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003122 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3123 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003124
3125unmap_pci_bars:
3126 be_unmap_pci_bars(adapter);
3127
3128done:
3129 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003130}
3131
3132static void be_stats_cleanup(struct be_adapter *adapter)
3133{
Sathya Perla3abcded2010-10-03 22:12:27 -07003134 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003135
3136 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003137 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3138 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003139}
3140
3141static int be_stats_init(struct be_adapter *adapter)
3142{
Sathya Perla3abcded2010-10-03 22:12:27 -07003143 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003144
Selvin Xavier005d5692011-05-16 07:36:35 +00003145 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003146 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003147 } else {
3148 if (lancer_chip(adapter))
3149 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3150 else
3151 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3152 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003153 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3154 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003155 if (cmd->va == NULL)
3156 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003157 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003158 return 0;
3159}
3160
3161static void __devexit be_remove(struct pci_dev *pdev)
3162{
3163 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003164
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003165 if (!adapter)
3166 return;
3167
Somnath Koturf203af72010-10-25 23:01:03 +00003168 cancel_delayed_work_sync(&adapter->work);
3169
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003170 unregister_netdev(adapter->netdev);
3171
Sathya Perla5fb379e2009-06-18 00:02:59 +00003172 be_clear(adapter);
3173
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003174 be_stats_cleanup(adapter);
3175
3176 be_ctrl_cleanup(adapter);
3177
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003178 be_sriov_disable(adapter);
3179
Sathya Perla8d56ff12009-11-22 22:02:26 +00003180 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003181
3182 pci_set_drvdata(pdev, NULL);
3183 pci_release_regions(pdev);
3184 pci_disable_device(pdev);
3185
3186 free_netdev(adapter->netdev);
3187}
3188
Sathya Perla2243e2e2009-11-22 22:02:03 +00003189static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003190{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003191 int status;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003192
Sathya Perla3abcded2010-10-03 22:12:27 -07003193 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3194 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003195 if (status)
3196 return status;
3197
Sathya Perla752961a2011-10-24 02:45:03 +00003198 if (adapter->function_mode & FLEX10_MODE)
Ajit Khaparde82903e42010-02-09 01:34:57 +00003199 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3200 else
3201 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3202
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003203 status = be_cmd_get_cntl_attributes(adapter);
3204 if (status)
3205 return status;
3206
Sathya Perla2243e2e2009-11-22 22:02:03 +00003207 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003208}
3209
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003210static int be_dev_family_check(struct be_adapter *adapter)
3211{
3212 struct pci_dev *pdev = adapter->pdev;
3213 u32 sli_intf = 0, if_type;
3214
3215 switch (pdev->device) {
3216 case BE_DEVICE_ID1:
3217 case OC_DEVICE_ID1:
3218 adapter->generation = BE_GEN2;
3219 break;
3220 case BE_DEVICE_ID2:
3221 case OC_DEVICE_ID2:
3222 adapter->generation = BE_GEN3;
3223 break;
3224 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003225 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003226 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3227 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3228 SLI_INTF_IF_TYPE_SHIFT;
3229
3230 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3231 if_type != 0x02) {
3232 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3233 return -EINVAL;
3234 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003235 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3236 SLI_INTF_FAMILY_SHIFT);
3237 adapter->generation = BE_GEN3;
3238 break;
3239 default:
3240 adapter->generation = 0;
3241 }
3242 return 0;
3243}
3244
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003245static int lancer_wait_ready(struct be_adapter *adapter)
3246{
3247#define SLIPORT_READY_TIMEOUT 500
3248 u32 sliport_status;
3249 int status = 0, i;
3250
3251 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3252 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3253 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3254 break;
3255
3256 msleep(20);
3257 }
3258
3259 if (i == SLIPORT_READY_TIMEOUT)
3260 status = -1;
3261
3262 return status;
3263}
3264
3265static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3266{
3267 int status;
3268 u32 sliport_status, err, reset_needed;
3269 status = lancer_wait_ready(adapter);
3270 if (!status) {
3271 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3272 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3273 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3274 if (err && reset_needed) {
3275 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3276 adapter->db + SLIPORT_CONTROL_OFFSET);
3277
3278 /* check adapter has corrected the error */
3279 status = lancer_wait_ready(adapter);
3280 sliport_status = ioread32(adapter->db +
3281 SLIPORT_STATUS_OFFSET);
3282 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3283 SLIPORT_STATUS_RN_MASK);
3284 if (status || sliport_status)
3285 status = -1;
3286 } else if (err || reset_needed) {
3287 status = -1;
3288 }
3289 }
3290 return status;
3291}
3292
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003293static int __devinit be_probe(struct pci_dev *pdev,
3294 const struct pci_device_id *pdev_id)
3295{
3296 int status = 0;
3297 struct be_adapter *adapter;
3298 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003299
3300 status = pci_enable_device(pdev);
3301 if (status)
3302 goto do_none;
3303
3304 status = pci_request_regions(pdev, DRV_NAME);
3305 if (status)
3306 goto disable_dev;
3307 pci_set_master(pdev);
3308
Sathya Perla3c8def92011-06-12 20:01:58 +00003309 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003310 if (netdev == NULL) {
3311 status = -ENOMEM;
3312 goto rel_reg;
3313 }
3314 adapter = netdev_priv(netdev);
3315 adapter->pdev = pdev;
3316 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003317
3318 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003319 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003320 goto free_netdev;
3321
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003322 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003323 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003324
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003325 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003326 if (!status) {
3327 netdev->features |= NETIF_F_HIGHDMA;
3328 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003329 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003330 if (status) {
3331 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3332 goto free_netdev;
3333 }
3334 }
3335
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003336 status = be_sriov_enable(adapter);
3337 if (status)
3338 goto free_netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003339
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003340 status = be_ctrl_init(adapter);
3341 if (status)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003342 goto disable_sriov;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003343
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003344 if (lancer_chip(adapter)) {
3345 status = lancer_test_and_set_rdy_state(adapter);
3346 if (status) {
3347 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003348 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003349 }
3350 }
3351
Sathya Perla2243e2e2009-11-22 22:02:03 +00003352 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003353 if (be_physfn(adapter)) {
3354 status = be_cmd_POST(adapter);
3355 if (status)
3356 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003357 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003358
3359 /* tell fw we're ready to fire cmds */
3360 status = be_cmd_fw_init(adapter);
3361 if (status)
3362 goto ctrl_clean;
3363
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003364 status = be_cmd_reset_function(adapter);
3365 if (status)
3366 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003367
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003368 status = be_stats_init(adapter);
3369 if (status)
3370 goto ctrl_clean;
3371
Sathya Perla2243e2e2009-11-22 22:02:03 +00003372 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003373 if (status)
3374 goto stats_clean;
3375
Sathya Perlab9ab82c2011-06-29 23:33:37 +00003376 /* The INTR bit may be set in the card when probed by a kdump kernel
3377 * after a crash.
3378 */
3379 if (!lancer_chip(adapter))
3380 be_intr_set(adapter, false);
3381
Sathya Perla3abcded2010-10-03 22:12:27 -07003382 be_msix_enable(adapter);
3383
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003384 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003385 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003386
Sathya Perla5fb379e2009-06-18 00:02:59 +00003387 status = be_setup(adapter);
3388 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003389 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003390
Sathya Perla3abcded2010-10-03 22:12:27 -07003391 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003392 status = register_netdev(netdev);
3393 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003394 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003395
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003396 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003397
Somnath Koturf203af72010-10-25 23:01:03 +00003398 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003399 return 0;
3400
Sathya Perla5fb379e2009-06-18 00:02:59 +00003401unsetup:
3402 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003403msix_disable:
3404 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003405stats_clean:
3406 be_stats_cleanup(adapter);
3407ctrl_clean:
3408 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003409disable_sriov:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003410 be_sriov_disable(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003411free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003412 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003413 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003414rel_reg:
3415 pci_release_regions(pdev);
3416disable_dev:
3417 pci_disable_device(pdev);
3418do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003419 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003420 return status;
3421}
3422
3423static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3424{
3425 struct be_adapter *adapter = pci_get_drvdata(pdev);
3426 struct net_device *netdev = adapter->netdev;
3427
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003428 cancel_delayed_work_sync(&adapter->work);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003429 if (adapter->wol)
3430 be_setup_wol(adapter, true);
3431
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003432 netif_device_detach(netdev);
3433 if (netif_running(netdev)) {
3434 rtnl_lock();
3435 be_close(netdev);
3436 rtnl_unlock();
3437 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003438 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003439
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003440 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003441 pci_save_state(pdev);
3442 pci_disable_device(pdev);
3443 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3444 return 0;
3445}
3446
3447static int be_resume(struct pci_dev *pdev)
3448{
3449 int status = 0;
3450 struct be_adapter *adapter = pci_get_drvdata(pdev);
3451 struct net_device *netdev = adapter->netdev;
3452
3453 netif_device_detach(netdev);
3454
3455 status = pci_enable_device(pdev);
3456 if (status)
3457 return status;
3458
3459 pci_set_power_state(pdev, 0);
3460 pci_restore_state(pdev);
3461
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003462 be_msix_enable(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003463 /* tell fw we're ready to fire cmds */
3464 status = be_cmd_fw_init(adapter);
3465 if (status)
3466 return status;
3467
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003468 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003469 if (netif_running(netdev)) {
3470 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003471 be_open(netdev);
3472 rtnl_unlock();
3473 }
3474 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003475
3476 if (adapter->wol)
3477 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003478
3479 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003480 return 0;
3481}
3482
Sathya Perla82456b02010-02-17 01:35:37 +00003483/*
3484 * An FLR will stop BE from DMAing any data.
3485 */
3486static void be_shutdown(struct pci_dev *pdev)
3487{
3488 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003489
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003490 if (!adapter)
3491 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003492
Sathya Perla0f4a6822011-03-21 20:49:28 +00003493 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003494
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003495 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003496
Sathya Perla82456b02010-02-17 01:35:37 +00003497 if (adapter->wol)
3498 be_setup_wol(adapter, true);
3499
Ajit Khaparde57841862011-04-06 18:08:43 +00003500 be_cmd_reset_function(adapter);
3501
Sathya Perla82456b02010-02-17 01:35:37 +00003502 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003503}
3504
Sathya Perlacf588472010-02-14 21:22:01 +00003505static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3506 pci_channel_state_t state)
3507{
3508 struct be_adapter *adapter = pci_get_drvdata(pdev);
3509 struct net_device *netdev = adapter->netdev;
3510
3511 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3512
3513 adapter->eeh_err = true;
3514
3515 netif_device_detach(netdev);
3516
3517 if (netif_running(netdev)) {
3518 rtnl_lock();
3519 be_close(netdev);
3520 rtnl_unlock();
3521 }
3522 be_clear(adapter);
3523
3524 if (state == pci_channel_io_perm_failure)
3525 return PCI_ERS_RESULT_DISCONNECT;
3526
3527 pci_disable_device(pdev);
3528
3529 return PCI_ERS_RESULT_NEED_RESET;
3530}
3531
3532static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3533{
3534 struct be_adapter *adapter = pci_get_drvdata(pdev);
3535 int status;
3536
3537 dev_info(&adapter->pdev->dev, "EEH reset\n");
3538 adapter->eeh_err = false;
3539
3540 status = pci_enable_device(pdev);
3541 if (status)
3542 return PCI_ERS_RESULT_DISCONNECT;
3543
3544 pci_set_master(pdev);
3545 pci_set_power_state(pdev, 0);
3546 pci_restore_state(pdev);
3547
3548 /* Check if card is ok and fw is ready */
3549 status = be_cmd_POST(adapter);
3550 if (status)
3551 return PCI_ERS_RESULT_DISCONNECT;
3552
3553 return PCI_ERS_RESULT_RECOVERED;
3554}
3555
3556static void be_eeh_resume(struct pci_dev *pdev)
3557{
3558 int status = 0;
3559 struct be_adapter *adapter = pci_get_drvdata(pdev);
3560 struct net_device *netdev = adapter->netdev;
3561
3562 dev_info(&adapter->pdev->dev, "EEH resume\n");
3563
3564 pci_save_state(pdev);
3565
3566 /* tell fw we're ready to fire cmds */
3567 status = be_cmd_fw_init(adapter);
3568 if (status)
3569 goto err;
3570
3571 status = be_setup(adapter);
3572 if (status)
3573 goto err;
3574
3575 if (netif_running(netdev)) {
3576 status = be_open(netdev);
3577 if (status)
3578 goto err;
3579 }
3580 netif_device_attach(netdev);
3581 return;
3582err:
3583 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003584}
3585
3586static struct pci_error_handlers be_eeh_handlers = {
3587 .error_detected = be_eeh_err_detected,
3588 .slot_reset = be_eeh_reset,
3589 .resume = be_eeh_resume,
3590};
3591
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003592static struct pci_driver be_driver = {
3593 .name = DRV_NAME,
3594 .id_table = be_dev_ids,
3595 .probe = be_probe,
3596 .remove = be_remove,
3597 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003598 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003599 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003600 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003601};
3602
3603static int __init be_init_module(void)
3604{
Joe Perches8e95a202009-12-03 07:58:21 +00003605 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3606 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003607 printk(KERN_WARNING DRV_NAME
3608 " : Module param rx_frag_size must be 2048/4096/8192."
3609 " Using 2048\n");
3610 rx_frag_size = 2048;
3611 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003612
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003613 return pci_register_driver(&be_driver);
3614}
3615module_init(be_init_module);
3616
3617static void __exit be_exit_module(void)
3618{
3619 pci_unregister_driver(&be_driver);
3620}
3621module_exit(be_exit_module);