blob: 2143e06f1ae92169a8fd814ea372252f86909aa1 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070024
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28MODULE_AUTHOR("ServerEngines Corporation");
29MODULE_LICENSE("GPL");
30
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla11ac75e2011-12-13 00:58:50 +000035static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070047 { 0 }
48};
49MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000050/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070051static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000052 "CEV",
53 "CTX",
54 "DBUF",
55 "ERX",
56 "Host",
57 "MPU",
58 "NDMA",
59 "PTC ",
60 "RDMA ",
61 "RXF ",
62 "RXIPS ",
63 "RXULP0 ",
64 "RXULP1 ",
65 "RXULP2 ",
66 "TIM ",
67 "TPOST ",
68 "TPRE ",
69 "TXIPS ",
70 "TXULP0 ",
71 "TXULP1 ",
72 "UC ",
73 "WDMA ",
74 "TXULP2 ",
75 "HOST1 ",
76 "P0_OB_LINK ",
77 "P1_OB_LINK ",
78 "HOST_GPIO ",
79 "MBOX ",
80 "AXGMAC0",
81 "AXGMAC1",
82 "JTAG",
83 "MPU_INTPEND"
84};
85/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070086static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000087 "LPCMEMHOST",
88 "MGMT_MAC",
89 "PCS0ONLINE",
90 "MPU_IRAM",
91 "PCS1ONLINE",
92 "PCTL0",
93 "PCTL1",
94 "PMEM",
95 "RR",
96 "TXPB",
97 "RXPP",
98 "XAUI",
99 "TXP",
100 "ARM",
101 "IPC",
102 "HOST2",
103 "HOST3",
104 "HOST4",
105 "HOST5",
106 "HOST6",
107 "HOST7",
108 "HOST8",
109 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700110 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown"
119};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700120
Sathya Perla752961a2011-10-24 02:45:03 +0000121/* Is BE in a multi-channel mode */
122static inline bool be_is_mc(struct be_adapter *adapter) {
123 return (adapter->function_mode & FLEX10_MODE ||
124 adapter->function_mode & VNIC_MODE ||
125 adapter->function_mode & UMC_ENABLED);
126}
127
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000131 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000132 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
133 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000134 mem->va = NULL;
135 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700136}
137
138static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
139 u16 len, u16 entry_size)
140{
141 struct be_dma_mem *mem = &q->dma_mem;
142
143 memset(q, 0, sizeof(*q));
144 q->len = len;
145 q->entry_size = entry_size;
146 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000147 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
148 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700149 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000150 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700151 memset(mem->va, 0, mem->size);
152 return 0;
153}
154
Sathya Perla8788fdc2009-07-27 22:52:03 +0000155static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000159 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000160 return;
161
Sathya Perladb3ea782011-08-22 19:41:52 +0000162 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
163 &reg);
164 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165
Sathya Perla5f0b8492009-07-27 22:52:56 +0000166 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700167 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000168 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000170 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700171 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000172
Sathya Perladb3ea782011-08-22 19:41:52 +0000173 pci_write_config_dword(adapter->pdev,
174 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700175}
176
Sathya Perla8788fdc2009-07-27 22:52:03 +0000177static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700178{
179 u32 val = 0;
180 val |= qid & DB_RQ_RING_ID_MASK;
181 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000182
183 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000184 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700185}
186
Sathya Perla8788fdc2009-07-27 22:52:03 +0000187static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700188{
189 u32 val = 0;
190 val |= qid & DB_TXULP_RING_ID_MASK;
191 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000192
193 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000194 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700195}
196
Sathya Perla8788fdc2009-07-27 22:52:03 +0000197static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700198 bool arm, bool clear_int, u16 num_popped)
199{
200 u32 val = 0;
201 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000202 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
203 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000204
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000205 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000206 return;
207
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700208 if (arm)
209 val |= 1 << DB_EQ_REARM_SHIFT;
210 if (clear_int)
211 val |= 1 << DB_EQ_CLR_SHIFT;
212 val |= 1 << DB_EQ_EVNT_SHIFT;
213 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000214 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700215}
216
Sathya Perla8788fdc2009-07-27 22:52:03 +0000217void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218{
219 u32 val = 0;
220 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000221 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
222 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000223
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000224 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000225 return;
226
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700227 if (arm)
228 val |= 1 << DB_CQ_REARM_SHIFT;
229 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000230 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231}
232
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700233static int be_mac_addr_set(struct net_device *netdev, void *p)
234{
235 struct be_adapter *adapter = netdev_priv(netdev);
236 struct sockaddr *addr = p;
237 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000238 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000239 u32 pmac_id = adapter->pmac_id[0];
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000240 bool active_mac = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000242 if (!is_valid_ether_addr(addr->sa_data))
243 return -EADDRNOTAVAIL;
244
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000245 /* For BE VF, MAC address is already activated by PF.
246 * Hence only operation left is updating netdev->devaddr.
247 * Update it if user is passing the same MAC which was used
248 * during configuring VF MAC from PF(Hypervisor).
249 */
250 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
251 status = be_cmd_mac_addr_query(adapter, current_mac,
252 false, adapter->if_handle, 0);
253 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
254 goto done;
255 else
256 goto err;
257 }
258
259 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
260 goto done;
261
262 /* For Lancer check if any MAC is active.
263 * If active, get its mac id.
264 */
265 if (lancer_chip(adapter) && !be_physfn(adapter))
266 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
267 &pmac_id, 0);
268
269 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
270 adapter->if_handle,
271 &adapter->pmac_id[0], 0);
272
Sathya Perlaa65027e2009-08-17 00:58:04 +0000273 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000274 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700275
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000276 if (active_mac)
277 be_cmd_pmac_del(adapter, adapter->if_handle,
278 pmac_id, 0);
279done:
Somnath Koture3a7ae22011-10-27 07:14:05 +0000280 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
281 return 0;
282err:
283 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700284 return status;
285}
286
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000287static void populate_be2_stats(struct be_adapter *adapter)
288{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000289 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
290 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
291 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000292 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000293 &rxf_stats->port[adapter->port_num];
294 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000295
Sathya Perlaac124ff2011-07-25 19:10:14 +0000296 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000297 drvs->rx_pause_frames = port_stats->rx_pause_frames;
298 drvs->rx_crc_errors = port_stats->rx_crc_errors;
299 drvs->rx_control_frames = port_stats->rx_control_frames;
300 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
301 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
302 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
303 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
304 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
305 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
306 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
307 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
308 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
309 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
310 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000311 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000312 drvs->rx_dropped_header_too_small =
313 port_stats->rx_dropped_header_too_small;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000314 drvs->rx_address_mismatch_drops =
315 port_stats->rx_address_mismatch_drops +
316 port_stats->rx_vlan_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000317 drvs->rx_alignment_symbol_errors =
318 port_stats->rx_alignment_symbol_errors;
319
320 drvs->tx_pauseframes = port_stats->tx_pauseframes;
321 drvs->tx_controlframes = port_stats->tx_controlframes;
322
323 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000324 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000325 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000326 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000327 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000328 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000329 drvs->forwarded_packets = rxf_stats->forwarded_packets;
330 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000331 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
332 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000333 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
334}
335
336static void populate_be3_stats(struct be_adapter *adapter)
337{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000338 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
339 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
340 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000341 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000342 &rxf_stats->port[adapter->port_num];
343 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000344
Sathya Perlaac124ff2011-07-25 19:10:14 +0000345 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000346 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
347 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000348 drvs->rx_pause_frames = port_stats->rx_pause_frames;
349 drvs->rx_crc_errors = port_stats->rx_crc_errors;
350 drvs->rx_control_frames = port_stats->rx_control_frames;
351 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
352 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
353 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
354 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
355 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
356 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
357 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
358 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
359 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
360 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
361 drvs->rx_dropped_header_too_small =
362 port_stats->rx_dropped_header_too_small;
363 drvs->rx_input_fifo_overflow_drop =
364 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000365 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000366 drvs->rx_alignment_symbol_errors =
367 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000368 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000369 drvs->tx_pauseframes = port_stats->tx_pauseframes;
370 drvs->tx_controlframes = port_stats->tx_controlframes;
371 drvs->jabber_events = port_stats->jabber_events;
372 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000373 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000374 drvs->forwarded_packets = rxf_stats->forwarded_packets;
375 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000376 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
377 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000378 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
379}
380
Selvin Xavier005d5692011-05-16 07:36:35 +0000381static void populate_lancer_stats(struct be_adapter *adapter)
382{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000383
Selvin Xavier005d5692011-05-16 07:36:35 +0000384 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000385 struct lancer_pport_stats *pport_stats =
386 pport_stats_from_cmd(adapter);
387
388 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
389 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
390 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
391 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000392 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000393 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000394 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
395 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
396 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
397 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
398 drvs->rx_dropped_tcp_length =
399 pport_stats->rx_dropped_invalid_tcp_length;
400 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
401 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
402 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
403 drvs->rx_dropped_header_too_small =
404 pport_stats->rx_dropped_header_too_small;
405 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000406 drvs->rx_address_mismatch_drops =
407 pport_stats->rx_address_mismatch_drops +
408 pport_stats->rx_vlan_mismatch_drops;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000409 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000410 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000411 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
412 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000413 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000414 drvs->forwarded_packets = pport_stats->num_forwards_lo;
415 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000416 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000417 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000418}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000419
Sathya Perla09c1c682011-08-22 19:41:53 +0000420static void accumulate_16bit_val(u32 *acc, u16 val)
421{
422#define lo(x) (x & 0xFFFF)
423#define hi(x) (x & 0xFFFF0000)
424 bool wrapped = val < lo(*acc);
425 u32 newacc = hi(*acc) + val;
426
427 if (wrapped)
428 newacc += 65536;
429 ACCESS_ONCE(*acc) = newacc;
430}
431
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432void be_parse_stats(struct be_adapter *adapter)
433{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000434 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
435 struct be_rx_obj *rxo;
436 int i;
437
Selvin Xavier005d5692011-05-16 07:36:35 +0000438 if (adapter->generation == BE_GEN3) {
439 if (lancer_chip(adapter))
440 populate_lancer_stats(adapter);
441 else
442 populate_be3_stats(adapter);
443 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000444 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000445 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000446
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000447 if (lancer_chip(adapter))
448 goto done;
449
Sathya Perlaac124ff2011-07-25 19:10:14 +0000450 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000451 for_all_rx_queues(adapter, rxo, i) {
452 /* below erx HW counter can actually wrap around after
453 * 65535. Driver accumulates a 32-bit value
454 */
455 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
456 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
457 }
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000458done:
459 return;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000460}
461
Sathya Perlaab1594e2011-07-25 19:10:15 +0000462static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
463 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700464{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000465 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000466 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700467 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000468 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000469 u64 pkts, bytes;
470 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700471 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700472
Sathya Perla3abcded2010-10-03 22:12:27 -0700473 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000474 const struct be_rx_stats *rx_stats = rx_stats(rxo);
475 do {
476 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
477 pkts = rx_stats(rxo)->rx_pkts;
478 bytes = rx_stats(rxo)->rx_bytes;
479 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
480 stats->rx_packets += pkts;
481 stats->rx_bytes += bytes;
482 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
483 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
484 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700485 }
486
Sathya Perla3c8def92011-06-12 20:01:58 +0000487 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000488 const struct be_tx_stats *tx_stats = tx_stats(txo);
489 do {
490 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
491 pkts = tx_stats(txo)->tx_pkts;
492 bytes = tx_stats(txo)->tx_bytes;
493 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
494 stats->tx_packets += pkts;
495 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000496 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700497
498 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000499 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000500 drvs->rx_alignment_symbol_errors +
501 drvs->rx_in_range_errors +
502 drvs->rx_out_range_errors +
503 drvs->rx_frame_too_long +
504 drvs->rx_dropped_too_small +
505 drvs->rx_dropped_too_short +
506 drvs->rx_dropped_header_too_small +
507 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000508 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700509
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700510 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000511 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000512 drvs->rx_out_range_errors +
513 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000514
Sathya Perlaab1594e2011-07-25 19:10:15 +0000515 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700516
517 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000518 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000519
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700520 /* receiver fifo overrun */
521 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000522 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000523 drvs->rx_input_fifo_overflow_drop +
524 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000525 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700526}
527
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000528void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700529{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700530 struct net_device *netdev = adapter->netdev;
531
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000532 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000533 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000534 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700535 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000536
537 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
538 netif_carrier_on(netdev);
539 else
540 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700541}
542
Sathya Perla3c8def92011-06-12 20:01:58 +0000543static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000544 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700545{
Sathya Perla3c8def92011-06-12 20:01:58 +0000546 struct be_tx_stats *stats = tx_stats(txo);
547
Sathya Perlaab1594e2011-07-25 19:10:15 +0000548 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000549 stats->tx_reqs++;
550 stats->tx_wrbs += wrb_cnt;
551 stats->tx_bytes += copied;
552 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700553 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000554 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000555 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700556}
557
558/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000559static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
560 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700561{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700562 int cnt = (skb->len > skb->data_len);
563
564 cnt += skb_shinfo(skb)->nr_frags;
565
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700566 /* to account for hdr wrb */
567 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000568 if (lancer_chip(adapter) || !(cnt & 1)) {
569 *dummy = false;
570 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700571 /* add a dummy to make it an even num */
572 cnt++;
573 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000574 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700575 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
576 return cnt;
577}
578
579static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
580{
581 wrb->frag_pa_hi = upper_32_bits(addr);
582 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
583 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000584 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700585}
586
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000587static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
588 struct sk_buff *skb)
589{
590 u8 vlan_prio;
591 u16 vlan_tag;
592
593 vlan_tag = vlan_tx_tag_get(skb);
594 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
595 /* If vlan priority provided by OS is NOT in available bmap */
596 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
597 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
598 adapter->recommended_prio;
599
600 return vlan_tag;
601}
602
Somnath Kotur93040ae2012-06-26 22:32:10 +0000603static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
604{
605 return vlan_tx_tag_present(skb) || adapter->pvid;
606}
607
Somnath Koturcc4ce022010-10-21 07:11:14 -0700608static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
609 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700610{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000611 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700612
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700613 memset(hdr, 0, sizeof(*hdr));
614
615 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
616
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000617 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700618 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
619 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
620 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000621 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000622 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000623 if (lancer_chip(adapter) && adapter->sli_family ==
624 LANCER_A0_SLI_FAMILY) {
625 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
626 if (is_tcp_pkt(skb))
627 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
628 tcpcs, hdr, 1);
629 else if (is_udp_pkt(skb))
630 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
631 udpcs, hdr, 1);
632 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700633 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
634 if (is_tcp_pkt(skb))
635 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
636 else if (is_udp_pkt(skb))
637 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
638 }
639
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700640 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700641 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000642 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700643 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700644 }
645
646 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
647 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
648 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
649 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
650}
651
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000652static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000653 bool unmap_single)
654{
655 dma_addr_t dma;
656
657 be_dws_le_to_cpu(wrb, sizeof(*wrb));
658
659 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000660 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000661 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000662 dma_unmap_single(dev, dma, wrb->frag_len,
663 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000664 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000665 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000666 }
667}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700668
Sathya Perla3c8def92011-06-12 20:01:58 +0000669static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
671{
Sathya Perla7101e112010-03-22 20:41:12 +0000672 dma_addr_t busaddr;
673 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000674 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700675 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700676 struct be_eth_wrb *wrb;
677 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000678 bool map_single = false;
679 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700680
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700681 hdr = queue_head_node(txq);
682 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000683 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684
David S. Millerebc8d2a2009-06-09 01:01:31 -0700685 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700686 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000687 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
688 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000689 goto dma_err;
690 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700691 wrb = queue_head_node(txq);
692 wrb_fill(wrb, busaddr, len);
693 be_dws_cpu_to_le(wrb, sizeof(*wrb));
694 queue_head_inc(txq);
695 copied += len;
696 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697
David S. Millerebc8d2a2009-06-09 01:01:31 -0700698 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000699 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700700 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000701 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000702 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000703 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000704 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700705 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000706 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700707 be_dws_cpu_to_le(wrb, sizeof(*wrb));
708 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000709 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700710 }
711
712 if (dummy_wrb) {
713 wrb = queue_head_node(txq);
714 wrb_fill(wrb, 0, 0);
715 be_dws_cpu_to_le(wrb, sizeof(*wrb));
716 queue_head_inc(txq);
717 }
718
Somnath Koturcc4ce022010-10-21 07:11:14 -0700719 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700720 be_dws_cpu_to_le(hdr, sizeof(*hdr));
721
722 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000723dma_err:
724 txq->head = map_head;
725 while (copied) {
726 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000727 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000728 map_single = false;
729 copied -= wrb->frag_len;
730 queue_head_inc(txq);
731 }
732 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700733}
734
Somnath Kotur93040ae2012-06-26 22:32:10 +0000735static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
736 struct sk_buff *skb)
737{
738 u16 vlan_tag = 0;
739
740 skb = skb_share_check(skb, GFP_ATOMIC);
741 if (unlikely(!skb))
742 return skb;
743
744 if (vlan_tx_tag_present(skb)) {
745 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
746 __vlan_put_tag(skb, vlan_tag);
747 skb->vlan_tci = 0;
748 }
749
750 return skb;
751}
752
Stephen Hemminger613573252009-08-31 19:50:58 +0000753static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700754 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700755{
756 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000757 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
758 struct be_queue_info *txq = &txo->q;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000759 struct iphdr *ip = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700760 u32 wrb_cnt = 0, copied = 0;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000761 u32 start = txq->head, eth_hdr_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700762 bool dummy_wrb, stopped = false;
763
Somnath Kotur93040ae2012-06-26 22:32:10 +0000764 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
765 VLAN_ETH_HLEN : ETH_HLEN;
766
767 /* HW has a bug which considers padding bytes as legal
768 * and modifies the IPv4 hdr's 'tot_len' field
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000769 */
Somnath Kotur93040ae2012-06-26 22:32:10 +0000770 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
771 is_ipv4_pkt(skb)) {
772 ip = (struct iphdr *)ip_hdr(skb);
773 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
774 }
775
776 /* HW has a bug wherein it will calculate CSUM for VLAN
777 * pkts even though it is disabled.
778 * Manually insert VLAN in pkt.
779 */
780 if (skb->ip_summed != CHECKSUM_PARTIAL &&
781 be_vlan_tag_chk(adapter, skb)) {
782 skb = be_insert_vlan_in_pkt(adapter, skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000783 if (unlikely(!skb))
784 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000785 }
786
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000787 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700788
Sathya Perla3c8def92011-06-12 20:01:58 +0000789 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000790 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000791 int gso_segs = skb_shinfo(skb)->gso_segs;
792
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000793 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000794 BUG_ON(txo->sent_skb_list[start]);
795 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700796
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000797 /* Ensure txq has space for the next skb; Else stop the queue
798 * *BEFORE* ringing the tx doorbell, so that we serialze the
799 * tx compls of the current transmit which'll wake up the queue
800 */
Sathya Perla7101e112010-03-22 20:41:12 +0000801 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000802 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
803 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000804 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000805 stopped = true;
806 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700807
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000808 be_txq_notify(adapter, txq->id, wrb_cnt);
809
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000810 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000811 } else {
812 txq->head = start;
813 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700814 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000815tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700816 return NETDEV_TX_OK;
817}
818
819static int be_change_mtu(struct net_device *netdev, int new_mtu)
820{
821 struct be_adapter *adapter = netdev_priv(netdev);
822 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000823 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
824 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700825 dev_info(&adapter->pdev->dev,
826 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000827 BE_MIN_MTU,
828 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700829 return -EINVAL;
830 }
831 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
832 netdev->mtu, new_mtu);
833 netdev->mtu = new_mtu;
834 return 0;
835}
836
837/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000838 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
839 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700840 */
Sathya Perla10329df2012-06-05 19:37:18 +0000841static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700842{
Sathya Perla10329df2012-06-05 19:37:18 +0000843 u16 vids[BE_NUM_VLANS_SUPPORTED];
844 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000845 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000846
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000847 /* No need to further configure vids if in promiscuous mode */
848 if (adapter->promiscuous)
849 return 0;
850
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000851 if (adapter->vlans_added > adapter->max_vlans)
852 goto set_vlan_promisc;
853
854 /* Construct VLAN Table to give to HW */
855 for (i = 0; i < VLAN_N_VID; i++)
856 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +0000857 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000858
859 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +0000860 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000861
862 /* Set to VLAN promisc mode as setting VLAN filter failed */
863 if (status) {
864 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
865 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
866 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700867 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000868
Sathya Perlab31c50a2009-09-17 10:30:13 -0700869 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000870
871set_vlan_promisc:
872 status = be_cmd_vlan_config(adapter, adapter->if_handle,
873 NULL, 0, 1, 1);
874 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700875}
876
Jiri Pirko8e586132011-12-08 19:52:37 -0500877static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700878{
879 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000880 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700881
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000882 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000883 status = -EINVAL;
884 goto ret;
885 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000886
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000887 /* Packets with VID 0 are always received by Lancer by default */
888 if (lancer_chip(adapter) && vid == 0)
889 goto ret;
890
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700891 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000892 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Sathya Perla10329df2012-06-05 19:37:18 +0000893 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500894
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000895 if (!status)
896 adapter->vlans_added++;
897 else
898 adapter->vlan_tag[vid] = 0;
899ret:
900 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700901}
902
Jiri Pirko8e586132011-12-08 19:52:37 -0500903static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700904{
905 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000906 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700907
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000908 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000909 status = -EINVAL;
910 goto ret;
911 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000912
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000913 /* Packets with VID 0 are always received by Lancer by default */
914 if (lancer_chip(adapter) && vid == 0)
915 goto ret;
916
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700917 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000918 if (adapter->vlans_added <= adapter->max_vlans)
Sathya Perla10329df2012-06-05 19:37:18 +0000919 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500920
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000921 if (!status)
922 adapter->vlans_added--;
923 else
924 adapter->vlan_tag[vid] = 1;
925ret:
926 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700927}
928
Sathya Perlaa54769f2011-10-24 02:45:00 +0000929static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700930{
931 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000932 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700933
934 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000935 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000936 adapter->promiscuous = true;
937 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700938 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000939
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300940 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000941 if (adapter->promiscuous) {
942 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000943 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000944
945 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +0000946 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +0000947 }
948
Sathya Perlae7b909a2009-11-22 22:01:10 +0000949 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000950 if (netdev->flags & IFF_ALLMULTI ||
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +0000951 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000952 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000953 goto done;
954 }
955
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000956 if (netdev_uc_count(netdev) != adapter->uc_macs) {
957 struct netdev_hw_addr *ha;
958 int i = 1; /* First slot is claimed by the Primary MAC */
959
960 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
961 be_cmd_pmac_del(adapter, adapter->if_handle,
962 adapter->pmac_id[i], 0);
963 }
964
965 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
966 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
967 adapter->promiscuous = true;
968 goto done;
969 }
970
971 netdev_for_each_uc_addr(ha, adapter->netdev) {
972 adapter->uc_macs++; /* First slot is for Primary MAC */
973 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
974 adapter->if_handle,
975 &adapter->pmac_id[adapter->uc_macs], 0);
976 }
977 }
978
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000979 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
980
981 /* Set to MCAST promisc mode if setting MULTICAST address fails */
982 if (status) {
983 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
984 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
985 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
986 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000987done:
988 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700989}
990
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000991static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
992{
993 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000994 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000995 int status;
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000996 bool active_mac = false;
997 u32 pmac_id;
998 u8 old_mac[ETH_ALEN];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000999
Sathya Perla11ac75e2011-12-13 00:58:50 +00001000 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001001 return -EPERM;
1002
Sathya Perla11ac75e2011-12-13 00:58:50 +00001003 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001004 return -EINVAL;
1005
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001006 if (lancer_chip(adapter)) {
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +00001007 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1008 &pmac_id, vf + 1);
1009 if (!status && active_mac)
1010 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1011 pmac_id, vf + 1);
1012
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001013 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1014 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +00001015 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1016 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001017
Sathya Perla11ac75e2011-12-13 00:58:50 +00001018 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1019 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001020 }
1021
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001022 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001023 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1024 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001025 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001026 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001027
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001028 return status;
1029}
1030
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001031static int be_get_vf_config(struct net_device *netdev, int vf,
1032 struct ifla_vf_info *vi)
1033{
1034 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001035 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001036
Sathya Perla11ac75e2011-12-13 00:58:50 +00001037 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001038 return -EPERM;
1039
Sathya Perla11ac75e2011-12-13 00:58:50 +00001040 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001041 return -EINVAL;
1042
1043 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001044 vi->tx_rate = vf_cfg->tx_rate;
1045 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001046 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001047 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001048
1049 return 0;
1050}
1051
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001052static int be_set_vf_vlan(struct net_device *netdev,
1053 int vf, u16 vlan, u8 qos)
1054{
1055 struct be_adapter *adapter = netdev_priv(netdev);
1056 int status = 0;
1057
Sathya Perla11ac75e2011-12-13 00:58:50 +00001058 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001059 return -EPERM;
1060
Sathya Perla11ac75e2011-12-13 00:58:50 +00001061 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001062 return -EINVAL;
1063
1064 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001065 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1066 /* If this is new value, program it. Else skip. */
1067 adapter->vf_cfg[vf].vlan_tag = vlan;
1068
1069 status = be_cmd_set_hsw_config(adapter, vlan,
1070 vf + 1, adapter->vf_cfg[vf].if_handle);
1071 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001072 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001073 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001074 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001075 vlan = adapter->vf_cfg[vf].def_vid;
1076 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1077 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001078 }
1079
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001080
1081 if (status)
1082 dev_info(&adapter->pdev->dev,
1083 "VLAN %d config on VF %d failed\n", vlan, vf);
1084 return status;
1085}
1086
Ajit Khapardee1d18732010-07-23 01:52:13 +00001087static int be_set_vf_tx_rate(struct net_device *netdev,
1088 int vf, int rate)
1089{
1090 struct be_adapter *adapter = netdev_priv(netdev);
1091 int status = 0;
1092
Sathya Perla11ac75e2011-12-13 00:58:50 +00001093 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001094 return -EPERM;
1095
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001096 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001097 return -EINVAL;
1098
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001099 if (rate < 100 || rate > 10000) {
1100 dev_err(&adapter->pdev->dev,
1101 "tx rate must be between 100 and 10000 Mbps\n");
1102 return -EINVAL;
1103 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001104
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001105 if (lancer_chip(adapter))
1106 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1107 else
1108 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001109
1110 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001111 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001112 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001113 else
1114 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001115 return status;
1116}
1117
Sathya Perla39f1d942012-05-08 19:41:24 +00001118static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1119{
1120 struct pci_dev *dev, *pdev = adapter->pdev;
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001121 int vfs = 0, assigned_vfs = 0, pos;
Sathya Perla39f1d942012-05-08 19:41:24 +00001122 u16 offset, stride;
1123
1124 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
Sathya Perlad79c0a22012-06-05 19:37:22 +00001125 if (!pos)
1126 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00001127 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1128 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1129
1130 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1131 while (dev) {
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001132 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
Sathya Perla39f1d942012-05-08 19:41:24 +00001133 vfs++;
1134 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1135 assigned_vfs++;
1136 }
1137 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1138 }
1139 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1140}
1141
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001142static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001143{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001144 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001145 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001146 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001147 u64 pkts;
1148 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001149
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001150 if (!eqo->enable_aic) {
1151 eqd = eqo->eqd;
1152 goto modify_eqd;
1153 }
1154
1155 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001156 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001157
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001158 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1159
Sathya Perla4097f662009-03-24 16:40:13 -07001160 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001161 if (time_before(now, stats->rx_jiffies)) {
1162 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001163 return;
1164 }
1165
Sathya Perlaac124ff2011-07-25 19:10:14 +00001166 /* Update once a second */
1167 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001168 return;
1169
Sathya Perlaab1594e2011-07-25 19:10:15 +00001170 do {
1171 start = u64_stats_fetch_begin_bh(&stats->sync);
1172 pkts = stats->rx_pkts;
1173 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1174
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001175 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001176 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001177 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001178 eqd = (stats->rx_pps / 110000) << 3;
1179 eqd = min(eqd, eqo->max_eqd);
1180 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001181 if (eqd < 10)
1182 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001183
1184modify_eqd:
1185 if (eqd != eqo->cur_eqd) {
1186 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1187 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001188 }
Sathya Perla4097f662009-03-24 16:40:13 -07001189}
1190
Sathya Perla3abcded2010-10-03 22:12:27 -07001191static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001192 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001193{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001194 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001195
Sathya Perlaab1594e2011-07-25 19:10:15 +00001196 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001197 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001198 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001199 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001200 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001201 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001202 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001203 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001204 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001205}
1206
Sathya Perla2e588f82011-03-11 02:49:26 +00001207static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001208{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001209 /* L4 checksum is not reliable for non TCP/UDP packets.
1210 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001211 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1212 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001213}
1214
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001215static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1216 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001217{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001218 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001219 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001220 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001221
Sathya Perla3abcded2010-10-03 22:12:27 -07001222 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001223 BUG_ON(!rx_page_info->page);
1224
Ajit Khaparde205859a2010-02-09 01:34:21 +00001225 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001226 dma_unmap_page(&adapter->pdev->dev,
1227 dma_unmap_addr(rx_page_info, bus),
1228 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001229 rx_page_info->last_page_user = false;
1230 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001231
1232 atomic_dec(&rxq->used);
1233 return rx_page_info;
1234}
1235
1236/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001237static void be_rx_compl_discard(struct be_rx_obj *rxo,
1238 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001239{
Sathya Perla3abcded2010-10-03 22:12:27 -07001240 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001241 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001242 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001243
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001244 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001245 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001246 put_page(page_info->page);
1247 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001248 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001249 }
1250}
1251
1252/*
1253 * skb_fill_rx_data forms a complete skb for an ether frame
1254 * indicated by rxcp.
1255 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001256static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1257 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001258{
Sathya Perla3abcded2010-10-03 22:12:27 -07001259 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001260 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001261 u16 i, j;
1262 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001263 u8 *start;
1264
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001265 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001266 start = page_address(page_info->page) + page_info->page_offset;
1267 prefetch(start);
1268
1269 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001270 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001271
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001272 skb->len = curr_frag_len;
1273 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001274 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001275 /* Complete packet has now been moved to data */
1276 put_page(page_info->page);
1277 skb->data_len = 0;
1278 skb->tail += curr_frag_len;
1279 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001280 hdr_len = ETH_HLEN;
1281 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001282 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001283 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001284 skb_shinfo(skb)->frags[0].page_offset =
1285 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001286 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001287 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001288 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001289 skb->tail += hdr_len;
1290 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001291 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001292
Sathya Perla2e588f82011-03-11 02:49:26 +00001293 if (rxcp->pkt_size <= rx_frag_size) {
1294 BUG_ON(rxcp->num_rcvd != 1);
1295 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001296 }
1297
1298 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001299 index_inc(&rxcp->rxq_idx, rxq->len);
1300 remaining = rxcp->pkt_size - curr_frag_len;
1301 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001302 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001303 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001304
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001305 /* Coalesce all frags from the same physical page in one slot */
1306 if (page_info->page_offset == 0) {
1307 /* Fresh page */
1308 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001309 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001310 skb_shinfo(skb)->frags[j].page_offset =
1311 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001312 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001313 skb_shinfo(skb)->nr_frags++;
1314 } else {
1315 put_page(page_info->page);
1316 }
1317
Eric Dumazet9e903e02011-10-18 21:00:24 +00001318 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001319 skb->len += curr_frag_len;
1320 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001321 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001322 remaining -= curr_frag_len;
1323 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001324 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001325 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001326 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001327}
1328
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001329/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001330static void be_rx_compl_process(struct be_rx_obj *rxo,
1331 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001332{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001333 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001334 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001335 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001336
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001337 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001338 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001339 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001340 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001341 return;
1342 }
1343
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001344 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001345
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001346 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001347 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001348 else
1349 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001350
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001351 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001352 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001353 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001354 skb->rxhash = rxcp->rss_hash;
1355
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001356
Jiri Pirko343e43c2011-08-25 02:50:51 +00001357 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001358 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1359
1360 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001361}
1362
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001363/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001364void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1365 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001366{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001367 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001368 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001369 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001370 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001371 u16 remaining, curr_frag_len;
1372 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001373
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001374 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001375 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001376 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001377 return;
1378 }
1379
Sathya Perla2e588f82011-03-11 02:49:26 +00001380 remaining = rxcp->pkt_size;
1381 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001382 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001383
1384 curr_frag_len = min(remaining, rx_frag_size);
1385
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001386 /* Coalesce all frags from the same physical page in one slot */
1387 if (i == 0 || page_info->page_offset == 0) {
1388 /* First frag or Fresh page */
1389 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001390 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001391 skb_shinfo(skb)->frags[j].page_offset =
1392 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001393 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001394 } else {
1395 put_page(page_info->page);
1396 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001397 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001398 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001399 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001400 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001401 memset(page_info, 0, sizeof(*page_info));
1402 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001403 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001404
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001405 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001406 skb->len = rxcp->pkt_size;
1407 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001408 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001409 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001410 if (adapter->netdev->features & NETIF_F_RXHASH)
1411 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001412
Jiri Pirko343e43c2011-08-25 02:50:51 +00001413 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001414 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1415
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001416 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001417}
1418
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001419static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1420 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001421{
Sathya Perla2e588f82011-03-11 02:49:26 +00001422 rxcp->pkt_size =
1423 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1424 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1425 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1426 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001427 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001428 rxcp->ip_csum =
1429 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1430 rxcp->l4_csum =
1431 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1432 rxcp->ipv6 =
1433 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1434 rxcp->rxq_idx =
1435 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1436 rxcp->num_rcvd =
1437 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1438 rxcp->pkt_type =
1439 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001440 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001441 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001442 if (rxcp->vlanf) {
1443 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001444 compl);
1445 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1446 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001447 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001448 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001449}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001450
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001451static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1452 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001453{
1454 rxcp->pkt_size =
1455 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1456 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1457 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1458 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001459 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001460 rxcp->ip_csum =
1461 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1462 rxcp->l4_csum =
1463 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1464 rxcp->ipv6 =
1465 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1466 rxcp->rxq_idx =
1467 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1468 rxcp->num_rcvd =
1469 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1470 rxcp->pkt_type =
1471 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001472 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001473 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001474 if (rxcp->vlanf) {
1475 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001476 compl);
1477 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1478 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001479 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001480 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001481}
1482
1483static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1484{
1485 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1486 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1487 struct be_adapter *adapter = rxo->adapter;
1488
1489 /* For checking the valid bit it is Ok to use either definition as the
1490 * valid bit is at the same position in both v0 and v1 Rx compl */
1491 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001492 return NULL;
1493
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001494 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001495 be_dws_le_to_cpu(compl, sizeof(*compl));
1496
1497 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001498 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001499 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001500 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001501
Sathya Perla15d72182011-03-21 20:49:26 +00001502 if (rxcp->vlanf) {
1503 /* vlanf could be wrongly set in some cards.
1504 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001505 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001506 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001507
Sathya Perla15d72182011-03-21 20:49:26 +00001508 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001509 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001510
Somnath Kotur939cf302011-08-18 21:51:49 -07001511 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001512 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001513 rxcp->vlanf = 0;
1514 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001515
1516 /* As the compl has been parsed, reset it; we wont touch it again */
1517 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001518
Sathya Perla3abcded2010-10-03 22:12:27 -07001519 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001520 return rxcp;
1521}
1522
Eric Dumazet1829b082011-03-01 05:48:12 +00001523static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001524{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001525 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001526
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001527 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001528 gfp |= __GFP_COMP;
1529 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001530}
1531
1532/*
1533 * Allocate a page, split it to fragments of size rx_frag_size and post as
1534 * receive buffers to BE
1535 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001536static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001537{
Sathya Perla3abcded2010-10-03 22:12:27 -07001538 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001539 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001540 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001541 struct page *pagep = NULL;
1542 struct be_eth_rx_d *rxd;
1543 u64 page_dmaaddr = 0, frag_dmaaddr;
1544 u32 posted, page_offset = 0;
1545
Sathya Perla3abcded2010-10-03 22:12:27 -07001546 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001547 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1548 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001549 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001550 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001551 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001552 break;
1553 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001554 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1555 0, adapter->big_page_size,
1556 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001557 page_info->page_offset = 0;
1558 } else {
1559 get_page(pagep);
1560 page_info->page_offset = page_offset + rx_frag_size;
1561 }
1562 page_offset = page_info->page_offset;
1563 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001564 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001565 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1566
1567 rxd = queue_head_node(rxq);
1568 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1569 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001570
1571 /* Any space left in the current big page for another frag? */
1572 if ((page_offset + rx_frag_size + rx_frag_size) >
1573 adapter->big_page_size) {
1574 pagep = NULL;
1575 page_info->last_page_user = true;
1576 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001577
1578 prev_page_info = page_info;
1579 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001580 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001581 }
1582 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001583 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001584
1585 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001586 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001587 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001588 } else if (atomic_read(&rxq->used) == 0) {
1589 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001590 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001591 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001592}
1593
Sathya Perla5fb379e2009-06-18 00:02:59 +00001594static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001595{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001596 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1597
1598 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1599 return NULL;
1600
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001601 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001602 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1603
1604 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1605
1606 queue_tail_inc(tx_cq);
1607 return txcp;
1608}
1609
Sathya Perla3c8def92011-06-12 20:01:58 +00001610static u16 be_tx_compl_process(struct be_adapter *adapter,
1611 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001612{
Sathya Perla3c8def92011-06-12 20:01:58 +00001613 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001614 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001615 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001616 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001617 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1618 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001619
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001620 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001621 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001622 sent_skbs[txq->tail] = NULL;
1623
1624 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001625 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001626
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001627 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001628 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001629 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001630 unmap_tx_frag(&adapter->pdev->dev, wrb,
1631 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001632 unmap_skb_hdr = false;
1633
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001634 num_wrbs++;
1635 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001636 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001637
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001638 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001639 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001640}
1641
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001642/* Return the number of events in the event queue */
1643static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001644{
1645 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001646 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001647
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001648 do {
1649 eqe = queue_tail_node(&eqo->q);
1650 if (eqe->evt == 0)
1651 break;
1652
1653 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001654 eqe->evt = 0;
1655 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001656 queue_tail_inc(&eqo->q);
1657 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001658
1659 return num;
1660}
1661
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001662static int event_handle(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001663{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001664 bool rearm = false;
1665 int num = events_get(eqo);
Sathya Perla859b1e42009-08-10 03:43:51 +00001666
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001667 /* Deal with any spurious interrupts that come without events */
1668 if (!num)
1669 rearm = true;
Sathya Perla859b1e42009-08-10 03:43:51 +00001670
Padmanabh Ratnakaraf311fe2012-04-25 01:46:39 +00001671 if (num || msix_enabled(eqo->adapter))
1672 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1673
Sathya Perla859b1e42009-08-10 03:43:51 +00001674 if (num)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001675 napi_schedule(&eqo->napi);
1676
1677 return num;
Sathya Perla859b1e42009-08-10 03:43:51 +00001678}
1679
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001680/* Leaves the EQ is disarmed state */
1681static void be_eq_clean(struct be_eq_obj *eqo)
1682{
1683 int num = events_get(eqo);
1684
1685 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1686}
1687
1688static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001689{
1690 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001691 struct be_queue_info *rxq = &rxo->q;
1692 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001693 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001694 u16 tail;
1695
1696 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001697 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001698 be_rx_compl_discard(rxo, rxcp);
1699 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001700 }
1701
1702 /* Then free posted rx buffer that were not used */
1703 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001704 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001705 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001706 put_page(page_info->page);
1707 memset(page_info, 0, sizeof(*page_info));
1708 }
1709 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001710 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001711}
1712
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001713static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001714{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001715 struct be_tx_obj *txo;
1716 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001717 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001718 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001719 struct sk_buff *sent_skb;
1720 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001721 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001722
Sathya Perlaa8e91792009-08-10 03:42:43 +00001723 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1724 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001725 pending_txqs = adapter->num_tx_qs;
1726
1727 for_all_tx_queues(adapter, txo, i) {
1728 txq = &txo->q;
1729 while ((txcp = be_tx_compl_get(&txo->cq))) {
1730 end_idx =
1731 AMAP_GET_BITS(struct amap_eth_tx_compl,
1732 wrb_index, txcp);
1733 num_wrbs += be_tx_compl_process(adapter, txo,
1734 end_idx);
1735 cmpl++;
1736 }
1737 if (cmpl) {
1738 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1739 atomic_sub(num_wrbs, &txq->used);
1740 cmpl = 0;
1741 num_wrbs = 0;
1742 }
1743 if (atomic_read(&txq->used) == 0)
1744 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001745 }
1746
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001747 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001748 break;
1749
1750 mdelay(1);
1751 } while (true);
1752
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001753 for_all_tx_queues(adapter, txo, i) {
1754 txq = &txo->q;
1755 if (atomic_read(&txq->used))
1756 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1757 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001758
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001759 /* free posted tx for which compls will never arrive */
1760 while (atomic_read(&txq->used)) {
1761 sent_skb = txo->sent_skb_list[txq->tail];
1762 end_idx = txq->tail;
1763 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1764 &dummy_wrb);
1765 index_adv(&end_idx, num_wrbs - 1, txq->len);
1766 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1767 atomic_sub(num_wrbs, &txq->used);
1768 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001769 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001770}
1771
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001772static void be_evt_queues_destroy(struct be_adapter *adapter)
1773{
1774 struct be_eq_obj *eqo;
1775 int i;
1776
1777 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001778 if (eqo->q.created) {
1779 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001780 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001781 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001782 be_queue_free(adapter, &eqo->q);
1783 }
1784}
1785
1786static int be_evt_queues_create(struct be_adapter *adapter)
1787{
1788 struct be_queue_info *eq;
1789 struct be_eq_obj *eqo;
1790 int i, rc;
1791
1792 adapter->num_evt_qs = num_irqs(adapter);
1793
1794 for_all_evt_queues(adapter, eqo, i) {
1795 eqo->adapter = adapter;
1796 eqo->tx_budget = BE_TX_BUDGET;
1797 eqo->idx = i;
1798 eqo->max_eqd = BE_MAX_EQD;
1799 eqo->enable_aic = true;
1800
1801 eq = &eqo->q;
1802 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1803 sizeof(struct be_eq_entry));
1804 if (rc)
1805 return rc;
1806
1807 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1808 if (rc)
1809 return rc;
1810 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001811 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001812}
1813
Sathya Perla5fb379e2009-06-18 00:02:59 +00001814static void be_mcc_queues_destroy(struct be_adapter *adapter)
1815{
1816 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001817
Sathya Perla8788fdc2009-07-27 22:52:03 +00001818 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001819 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001820 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001821 be_queue_free(adapter, q);
1822
Sathya Perla8788fdc2009-07-27 22:52:03 +00001823 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001824 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001825 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001826 be_queue_free(adapter, q);
1827}
1828
1829/* Must be called only after TX qs are created as MCC shares TX EQ */
1830static int be_mcc_queues_create(struct be_adapter *adapter)
1831{
1832 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001833
Sathya Perla8788fdc2009-07-27 22:52:03 +00001834 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001835 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001836 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001837 goto err;
1838
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001839 /* Use the default EQ for MCC completions */
1840 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001841 goto mcc_cq_free;
1842
Sathya Perla8788fdc2009-07-27 22:52:03 +00001843 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001844 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1845 goto mcc_cq_destroy;
1846
Sathya Perla8788fdc2009-07-27 22:52:03 +00001847 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001848 goto mcc_q_free;
1849
1850 return 0;
1851
1852mcc_q_free:
1853 be_queue_free(adapter, q);
1854mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001855 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001856mcc_cq_free:
1857 be_queue_free(adapter, cq);
1858err:
1859 return -1;
1860}
1861
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001862static void be_tx_queues_destroy(struct be_adapter *adapter)
1863{
1864 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001865 struct be_tx_obj *txo;
1866 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001867
Sathya Perla3c8def92011-06-12 20:01:58 +00001868 for_all_tx_queues(adapter, txo, i) {
1869 q = &txo->q;
1870 if (q->created)
1871 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1872 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001873
Sathya Perla3c8def92011-06-12 20:01:58 +00001874 q = &txo->cq;
1875 if (q->created)
1876 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1877 be_queue_free(adapter, q);
1878 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001879}
1880
Sathya Perladafc0fe2011-10-24 02:45:02 +00001881static int be_num_txqs_want(struct be_adapter *adapter)
1882{
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001883 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1884 be_is_mc(adapter) ||
1885 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
Sathya Perla39f1d942012-05-08 19:41:24 +00001886 adapter->generation == BE_GEN2)
Sathya Perladafc0fe2011-10-24 02:45:02 +00001887 return 1;
1888 else
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001889 return adapter->max_tx_queues;
Sathya Perladafc0fe2011-10-24 02:45:02 +00001890}
1891
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001892static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001893{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001894 struct be_queue_info *cq, *eq;
1895 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001896 struct be_tx_obj *txo;
1897 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001898
Sathya Perladafc0fe2011-10-24 02:45:02 +00001899 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001900 if (adapter->num_tx_qs != MAX_TX_QS) {
1901 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001902 netif_set_real_num_tx_queues(adapter->netdev,
1903 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001904 rtnl_unlock();
1905 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001906
Sathya Perla3c8def92011-06-12 20:01:58 +00001907 for_all_tx_queues(adapter, txo, i) {
1908 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001909 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1910 sizeof(struct be_eth_tx_compl));
1911 if (status)
1912 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001913
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001914 /* If num_evt_qs is less than num_tx_qs, then more than
1915 * one txq share an eq
1916 */
1917 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1918 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1919 if (status)
1920 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001921 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001922 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001923}
1924
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001925static int be_tx_qs_create(struct be_adapter *adapter)
1926{
1927 struct be_tx_obj *txo;
1928 int i, status;
1929
1930 for_all_tx_queues(adapter, txo, i) {
1931 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1932 sizeof(struct be_eth_wrb));
1933 if (status)
1934 return status;
1935
1936 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1937 if (status)
1938 return status;
1939 }
1940
Sathya Perlad3791422012-09-28 04:39:44 +00001941 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1942 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001943 return 0;
1944}
1945
1946static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001947{
1948 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001949 struct be_rx_obj *rxo;
1950 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001951
Sathya Perla3abcded2010-10-03 22:12:27 -07001952 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001953 q = &rxo->cq;
1954 if (q->created)
1955 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1956 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001957 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001958}
1959
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001960static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001961{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001962 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001963 struct be_rx_obj *rxo;
1964 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001965
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001966 /* We'll create as many RSS rings as there are irqs.
1967 * But when there's only one irq there's no use creating RSS rings
1968 */
1969 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1970 num_irqs(adapter) + 1 : 1;
Sathya Perla7f640062012-06-05 19:37:20 +00001971 if (adapter->num_rx_qs != MAX_RX_QS) {
1972 rtnl_lock();
1973 netif_set_real_num_rx_queues(adapter->netdev,
1974 adapter->num_rx_qs);
1975 rtnl_unlock();
1976 }
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001977
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001978 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001979 for_all_rx_queues(adapter, rxo, i) {
1980 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07001981 cq = &rxo->cq;
1982 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1983 sizeof(struct be_eth_rx_compl));
1984 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001985 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001986
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001987 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1988 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07001989 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001990 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07001991 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001992
Sathya Perlad3791422012-09-28 04:39:44 +00001993 dev_info(&adapter->pdev->dev,
1994 "created %d RSS queue(s) and 1 default RX queue\n",
1995 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001996 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00001997}
1998
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001999static irqreturn_t be_intx(int irq, void *dev)
2000{
2001 struct be_adapter *adapter = dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002002 int num_evts;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002003
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002004 /* With INTx only one EQ is used */
2005 num_evts = event_handle(&adapter->eq_obj[0]);
2006 if (num_evts)
2007 return IRQ_HANDLED;
2008 else
2009 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002010}
2011
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002012static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002013{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002014 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002015
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002016 event_handle(eqo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002017 return IRQ_HANDLED;
2018}
2019
Sathya Perla2e588f82011-03-11 02:49:26 +00002020static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002021{
Sathya Perla2e588f82011-03-11 02:49:26 +00002022 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002023}
2024
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002025static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2026 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002027{
Sathya Perla3abcded2010-10-03 22:12:27 -07002028 struct be_adapter *adapter = rxo->adapter;
2029 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002030 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002031 u32 work_done;
2032
2033 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002034 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002035 if (!rxcp)
2036 break;
2037
Sathya Perla12004ae2011-08-02 19:57:46 +00002038 /* Is it a flush compl that has no data */
2039 if (unlikely(rxcp->num_rcvd == 0))
2040 goto loop_continue;
2041
2042 /* Discard compl with partial DMA Lancer B0 */
2043 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002044 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002045 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002046 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002047
Sathya Perla12004ae2011-08-02 19:57:46 +00002048 /* On BE drop pkts that arrive due to imperfect filtering in
2049 * promiscuous mode on some skews
2050 */
2051 if (unlikely(rxcp->port != adapter->port_num &&
2052 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002053 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002054 goto loop_continue;
2055 }
2056
2057 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002058 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002059 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002060 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002061loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002062 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002063 }
2064
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002065 if (work_done) {
2066 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002067
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002068 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2069 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002070 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002071
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002072 return work_done;
2073}
2074
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002075static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2076 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002077{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002078 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002079 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002080
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002081 for (work_done = 0; work_done < budget; work_done++) {
2082 txcp = be_tx_compl_get(&txo->cq);
2083 if (!txcp)
2084 break;
2085 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002086 AMAP_GET_BITS(struct amap_eth_tx_compl,
2087 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002088 }
2089
2090 if (work_done) {
2091 be_cq_notify(adapter, txo->cq.id, true, work_done);
2092 atomic_sub(num_wrbs, &txo->q.used);
2093
2094 /* As Tx wrbs have been freed up, wake up netdev queue
2095 * if it was stopped due to lack of tx wrbs. */
2096 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2097 atomic_read(&txo->q.used) < txo->q.len / 2) {
2098 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002099 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002100
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002101 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2102 tx_stats(txo)->tx_compl += work_done;
2103 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2104 }
2105 return (work_done < budget); /* Done */
2106}
Sathya Perla3c8def92011-06-12 20:01:58 +00002107
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002108int be_poll(struct napi_struct *napi, int budget)
2109{
2110 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2111 struct be_adapter *adapter = eqo->adapter;
2112 int max_work = 0, work, i;
2113 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002114
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002115 /* Process all TXQs serviced by this EQ */
2116 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2117 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2118 eqo->tx_budget, i);
2119 if (!tx_done)
2120 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002121 }
2122
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002123 /* This loop will iterate twice for EQ0 in which
2124 * completions of the last RXQ (default one) are also processed
2125 * For other EQs the loop iterates only once
2126 */
2127 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2128 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2129 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002130 }
2131
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002132 if (is_mcc_eqo(eqo))
2133 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002134
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002135 if (max_work < budget) {
2136 napi_complete(napi);
2137 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2138 } else {
2139 /* As we'll continue in polling mode, count and clear events */
2140 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002141 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002142 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002143}
2144
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002145void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002146{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002147 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2148 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002149 u32 i;
2150
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002151 if (be_crit_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002152 return;
2153
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002154 if (lancer_chip(adapter)) {
2155 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2156 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2157 sliport_err1 = ioread32(adapter->db +
2158 SLIPORT_ERROR1_OFFSET);
2159 sliport_err2 = ioread32(adapter->db +
2160 SLIPORT_ERROR2_OFFSET);
2161 }
2162 } else {
2163 pci_read_config_dword(adapter->pdev,
2164 PCICFG_UE_STATUS_LOW, &ue_lo);
2165 pci_read_config_dword(adapter->pdev,
2166 PCICFG_UE_STATUS_HIGH, &ue_hi);
2167 pci_read_config_dword(adapter->pdev,
2168 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2169 pci_read_config_dword(adapter->pdev,
2170 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002171
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002172 ue_lo = (ue_lo & ~ue_lo_mask);
2173 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002174 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002175
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002176 /* On certain platforms BE hardware can indicate spurious UEs.
2177 * Allow the h/w to stop working completely in case of a real UE.
2178 * Hence not setting the hw_error for UE detection.
2179 */
2180 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002181 adapter->hw_error = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002182 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002183 "Error detected in the card\n");
2184 }
2185
2186 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2187 dev_err(&adapter->pdev->dev,
2188 "ERR: sliport status 0x%x\n", sliport_status);
2189 dev_err(&adapter->pdev->dev,
2190 "ERR: sliport error1 0x%x\n", sliport_err1);
2191 dev_err(&adapter->pdev->dev,
2192 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002193 }
2194
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002195 if (ue_lo) {
2196 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2197 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002198 dev_err(&adapter->pdev->dev,
2199 "UE: %s bit set\n", ue_status_low_desc[i]);
2200 }
2201 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002202
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002203 if (ue_hi) {
2204 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2205 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002206 dev_err(&adapter->pdev->dev,
2207 "UE: %s bit set\n", ue_status_hi_desc[i]);
2208 }
2209 }
2210
2211}
2212
Sathya Perla8d56ff12009-11-22 22:02:26 +00002213static void be_msix_disable(struct be_adapter *adapter)
2214{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002215 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002216 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002217 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002218 }
2219}
2220
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002221static uint be_num_rss_want(struct be_adapter *adapter)
2222{
Yuval Mintz30e80b52012-07-01 03:19:00 +00002223 u32 num = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002224
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002225 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002226 (lancer_chip(adapter) ||
2227 (!sriov_want(adapter) && be_physfn(adapter)))) {
2228 num = adapter->max_rss_queues;
Yuval Mintz30e80b52012-07-01 03:19:00 +00002229 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2230 }
2231 return num;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002232}
2233
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002234static void be_msix_enable(struct be_adapter *adapter)
2235{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002236#define BE_MIN_MSIX_VECTORS 1
Parav Pandit045508a2012-03-26 14:27:13 +00002237 int i, status, num_vec, num_roce_vec = 0;
Sathya Perlad3791422012-09-28 04:39:44 +00002238 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002239
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002240 /* If RSS queues are not used, need a vec for default RX Q */
2241 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
Parav Pandit045508a2012-03-26 14:27:13 +00002242 if (be_roce_supported(adapter)) {
2243 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2244 (num_online_cpus() + 1));
2245 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2246 num_vec += num_roce_vec;
2247 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2248 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002249 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002250
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002251 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002252 adapter->msix_entries[i].entry = i;
2253
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002254 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002255 if (status == 0) {
2256 goto done;
2257 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002258 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002259 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002260 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002261 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002262 }
Sathya Perlad3791422012-09-28 04:39:44 +00002263
2264 dev_warn(dev, "MSIx enable failed\n");
Sathya Perla3abcded2010-10-03 22:12:27 -07002265 return;
2266done:
Parav Pandit045508a2012-03-26 14:27:13 +00002267 if (be_roce_supported(adapter)) {
2268 if (num_vec > num_roce_vec) {
2269 adapter->num_msix_vec = num_vec - num_roce_vec;
2270 adapter->num_msix_roce_vec =
2271 num_vec - adapter->num_msix_vec;
2272 } else {
2273 adapter->num_msix_vec = num_vec;
2274 adapter->num_msix_roce_vec = 0;
2275 }
2276 } else
2277 adapter->num_msix_vec = num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002278 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002279 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002280}
2281
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002282static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002283 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002284{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002285 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002286}
2287
2288static int be_msix_register(struct be_adapter *adapter)
2289{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002290 struct net_device *netdev = adapter->netdev;
2291 struct be_eq_obj *eqo;
2292 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002293
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002294 for_all_evt_queues(adapter, eqo, i) {
2295 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2296 vec = be_msix_vec_get(adapter, eqo);
2297 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002298 if (status)
2299 goto err_msix;
2300 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002301
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002302 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002303err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002304 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2305 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2306 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2307 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002308 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002309 return status;
2310}
2311
2312static int be_irq_register(struct be_adapter *adapter)
2313{
2314 struct net_device *netdev = adapter->netdev;
2315 int status;
2316
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002317 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002318 status = be_msix_register(adapter);
2319 if (status == 0)
2320 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002321 /* INTx is not supported for VF */
2322 if (!be_physfn(adapter))
2323 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002324 }
2325
2326 /* INTx */
2327 netdev->irq = adapter->pdev->irq;
2328 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2329 adapter);
2330 if (status) {
2331 dev_err(&adapter->pdev->dev,
2332 "INTx request IRQ failed - err %d\n", status);
2333 return status;
2334 }
2335done:
2336 adapter->isr_registered = true;
2337 return 0;
2338}
2339
2340static void be_irq_unregister(struct be_adapter *adapter)
2341{
2342 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002343 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002344 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002345
2346 if (!adapter->isr_registered)
2347 return;
2348
2349 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002350 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002351 free_irq(netdev->irq, adapter);
2352 goto done;
2353 }
2354
2355 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002356 for_all_evt_queues(adapter, eqo, i)
2357 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002358
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002359done:
2360 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002361}
2362
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002363static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002364{
2365 struct be_queue_info *q;
2366 struct be_rx_obj *rxo;
2367 int i;
2368
2369 for_all_rx_queues(adapter, rxo, i) {
2370 q = &rxo->q;
2371 if (q->created) {
2372 be_cmd_rxq_destroy(adapter, q);
2373 /* After the rxq is invalidated, wait for a grace time
2374 * of 1ms for all dma to end and the flush compl to
2375 * arrive
2376 */
2377 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002378 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002379 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002380 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002381 }
2382}
2383
Sathya Perla889cd4b2010-05-30 23:33:45 +00002384static int be_close(struct net_device *netdev)
2385{
2386 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002387 struct be_eq_obj *eqo;
2388 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002389
Parav Pandit045508a2012-03-26 14:27:13 +00002390 be_roce_dev_close(adapter);
2391
Sathya Perla889cd4b2010-05-30 23:33:45 +00002392 be_async_mcc_disable(adapter);
2393
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002394 if (!lancer_chip(adapter))
2395 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002396
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002397 for_all_evt_queues(adapter, eqo, i) {
2398 napi_disable(&eqo->napi);
2399 if (msix_enabled(adapter))
2400 synchronize_irq(be_msix_vec_get(adapter, eqo));
2401 else
2402 synchronize_irq(netdev->irq);
2403 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002404 }
2405
Sathya Perla889cd4b2010-05-30 23:33:45 +00002406 be_irq_unregister(adapter);
2407
Sathya Perla889cd4b2010-05-30 23:33:45 +00002408 /* Wait for all pending tx completions to arrive so that
2409 * all tx skbs are freed.
2410 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002411 be_tx_compl_clean(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002412
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002413 be_rx_qs_destroy(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002414 return 0;
2415}
2416
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002417static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002418{
2419 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002420 int rc, i, j;
2421 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002422
2423 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002424 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2425 sizeof(struct be_eth_rx_d));
2426 if (rc)
2427 return rc;
2428 }
2429
2430 /* The FW would like the default RXQ to be created first */
2431 rxo = default_rxo(adapter);
2432 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2433 adapter->if_handle, false, &rxo->rss_id);
2434 if (rc)
2435 return rc;
2436
2437 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002438 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002439 rx_frag_size, adapter->if_handle,
2440 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002441 if (rc)
2442 return rc;
2443 }
2444
2445 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002446 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2447 for_all_rss_queues(adapter, rxo, i) {
2448 if ((j + i) >= 128)
2449 break;
2450 rsstable[j + i] = rxo->rss_id;
2451 }
2452 }
2453 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002454 if (rc)
2455 return rc;
2456 }
2457
2458 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002459 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002460 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002461 return 0;
2462}
2463
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002464static int be_open(struct net_device *netdev)
2465{
2466 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002467 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002468 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002469 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002470 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002471 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002472
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002473 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002474 if (status)
2475 goto err;
2476
Sathya Perla5fb379e2009-06-18 00:02:59 +00002477 be_irq_register(adapter);
2478
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002479 if (!lancer_chip(adapter))
2480 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002481
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002482 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002483 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002484
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002485 for_all_tx_queues(adapter, txo, i)
2486 be_cq_notify(adapter, txo->cq.id, true, 0);
2487
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002488 be_async_mcc_enable(adapter);
2489
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002490 for_all_evt_queues(adapter, eqo, i) {
2491 napi_enable(&eqo->napi);
2492 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2493 }
2494
Sathya Perla323ff712012-09-28 04:39:43 +00002495 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002496 if (!status)
2497 be_link_status_update(adapter, link_status);
2498
Parav Pandit045508a2012-03-26 14:27:13 +00002499 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002500 return 0;
2501err:
2502 be_close(adapter->netdev);
2503 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002504}
2505
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002506static int be_setup_wol(struct be_adapter *adapter, bool enable)
2507{
2508 struct be_dma_mem cmd;
2509 int status = 0;
2510 u8 mac[ETH_ALEN];
2511
2512 memset(mac, 0, ETH_ALEN);
2513
2514 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002515 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2516 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002517 if (cmd.va == NULL)
2518 return -1;
2519 memset(cmd.va, 0, cmd.size);
2520
2521 if (enable) {
2522 status = pci_write_config_dword(adapter->pdev,
2523 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2524 if (status) {
2525 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002526 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002527 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2528 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002529 return status;
2530 }
2531 status = be_cmd_enable_magic_wol(adapter,
2532 adapter->netdev->dev_addr, &cmd);
2533 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2534 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2535 } else {
2536 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2537 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2538 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2539 }
2540
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002541 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002542 return status;
2543}
2544
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002545/*
2546 * Generate a seed MAC address from the PF MAC Address using jhash.
2547 * MAC Address for VFs are assigned incrementally starting from the seed.
2548 * These addresses are programmed in the ASIC by the PF and the VF driver
2549 * queries for the MAC address during its probe.
2550 */
2551static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2552{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002553 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002554 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002555 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002556 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002557
2558 be_vf_eth_addr_generate(adapter, mac);
2559
Sathya Perla11ac75e2011-12-13 00:58:50 +00002560 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002561 if (lancer_chip(adapter)) {
2562 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2563 } else {
2564 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002565 vf_cfg->if_handle,
2566 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002567 }
2568
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002569 if (status)
2570 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002571 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002572 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002573 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002574
2575 mac[5] += 1;
2576 }
2577 return status;
2578}
2579
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002580static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002581{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002582 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002583 u32 vf;
2584
Sathya Perla39f1d942012-05-08 19:41:24 +00002585 if (be_find_vfs(adapter, ASSIGNED)) {
2586 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2587 goto done;
2588 }
2589
Sathya Perla11ac75e2011-12-13 00:58:50 +00002590 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002591 if (lancer_chip(adapter))
2592 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2593 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002594 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2595 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002596
Sathya Perla11ac75e2011-12-13 00:58:50 +00002597 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2598 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002599 pci_disable_sriov(adapter->pdev);
2600done:
2601 kfree(adapter->vf_cfg);
2602 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002603}
2604
Sathya Perlaa54769f2011-10-24 02:45:00 +00002605static int be_clear(struct be_adapter *adapter)
2606{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002607 int i = 1;
2608
Sathya Perla191eb752012-02-23 18:50:13 +00002609 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2610 cancel_delayed_work_sync(&adapter->work);
2611 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2612 }
2613
Sathya Perla11ac75e2011-12-13 00:58:50 +00002614 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002615 be_vf_clear(adapter);
2616
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002617 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2618 be_cmd_pmac_del(adapter, adapter->if_handle,
2619 adapter->pmac_id[i], 0);
2620
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002621 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002622
2623 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002624 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002625 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002626 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002627
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002628 kfree(adapter->pmac_id);
2629 adapter->pmac_id = NULL;
2630
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002631 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002632 return 0;
2633}
2634
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002635static void be_get_vf_if_cap_flags(struct be_adapter *adapter,
2636 u32 *cap_flags, u8 domain)
2637{
2638 bool profile_present = false;
2639 int status;
2640
2641 if (lancer_chip(adapter)) {
2642 status = be_cmd_get_profile_config(adapter, cap_flags, domain);
2643 if (!status)
2644 profile_present = true;
2645 }
2646
2647 if (!profile_present)
2648 *cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2649 BE_IF_FLAGS_MULTICAST;
2650}
2651
Sathya Perla39f1d942012-05-08 19:41:24 +00002652static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002653{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002654 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002655 int vf;
2656
Sathya Perla39f1d942012-05-08 19:41:24 +00002657 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2658 GFP_KERNEL);
2659 if (!adapter->vf_cfg)
2660 return -ENOMEM;
2661
Sathya Perla11ac75e2011-12-13 00:58:50 +00002662 for_all_vfs(adapter, vf_cfg, vf) {
2663 vf_cfg->if_handle = -1;
2664 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002665 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002666 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002667}
2668
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002669static int be_vf_setup(struct be_adapter *adapter)
2670{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002671 struct be_vf_cfg *vf_cfg;
Sathya Perla39f1d942012-05-08 19:41:24 +00002672 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002673 u32 cap_flags, en_flags, vf;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002674 u16 def_vlan, lnk_speed;
Sathya Perla39f1d942012-05-08 19:41:24 +00002675 int status, enabled_vfs;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002676
Sathya Perla39f1d942012-05-08 19:41:24 +00002677 enabled_vfs = be_find_vfs(adapter, ENABLED);
2678 if (enabled_vfs) {
2679 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2680 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2681 return 0;
2682 }
2683
2684 if (num_vfs > adapter->dev_num_vfs) {
2685 dev_warn(dev, "Device supports %d VFs and not %d\n",
2686 adapter->dev_num_vfs, num_vfs);
2687 num_vfs = adapter->dev_num_vfs;
2688 }
2689
2690 status = pci_enable_sriov(adapter->pdev, num_vfs);
2691 if (!status) {
2692 adapter->num_vfs = num_vfs;
2693 } else {
2694 /* Platform doesn't support SRIOV though device supports it */
2695 dev_warn(dev, "SRIOV enable failed\n");
2696 return 0;
2697 }
2698
2699 status = be_vf_setup_init(adapter);
2700 if (status)
2701 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002702
Sathya Perla11ac75e2011-12-13 00:58:50 +00002703 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002704 be_get_vf_if_cap_flags(adapter, &cap_flags, vf + 1);
2705
2706 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2707 BE_IF_FLAGS_BROADCAST |
2708 BE_IF_FLAGS_MULTICAST);
2709
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002710 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2711 &vf_cfg->if_handle, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002712 if (status)
2713 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002714 }
2715
Sathya Perla39f1d942012-05-08 19:41:24 +00002716 if (!enabled_vfs) {
2717 status = be_vf_eth_addr_config(adapter);
2718 if (status)
2719 goto err;
2720 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002721
Sathya Perla11ac75e2011-12-13 00:58:50 +00002722 for_all_vfs(adapter, vf_cfg, vf) {
Vasundhara Volam8a046d32012-08-28 20:37:42 +00002723 lnk_speed = 1000;
2724 status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002725 if (status)
2726 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002727 vf_cfg->tx_rate = lnk_speed * 10;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002728
2729 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2730 vf + 1, vf_cfg->if_handle);
2731 if (status)
2732 goto err;
2733 vf_cfg->def_vid = def_vlan;
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00002734
2735 be_cmd_enable_vf(adapter, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002736 }
2737 return 0;
2738err:
2739 return status;
2740}
2741
Sathya Perla30128032011-11-10 19:17:57 +00002742static void be_setup_init(struct be_adapter *adapter)
2743{
2744 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002745 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002746 adapter->if_handle = -1;
2747 adapter->be3_native = false;
2748 adapter->promiscuous = false;
2749 adapter->eq_next_idx = 0;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002750
2751 if (be_physfn(adapter))
2752 adapter->cmd_privileges = MAX_PRIVILEGES;
2753 else
2754 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00002755}
2756
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002757static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2758 bool *active_mac, u32 *pmac_id)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002759{
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002760 int status = 0;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002761
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002762 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2763 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2764 if (!lancer_chip(adapter) && !be_physfn(adapter))
2765 *active_mac = true;
2766 else
2767 *active_mac = false;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002768
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002769 return status;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002770 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002771
2772 if (lancer_chip(adapter)) {
2773 status = be_cmd_get_mac_from_list(adapter, mac,
2774 active_mac, pmac_id, 0);
2775 if (*active_mac) {
Sathya Perla5ee49792012-09-28 04:39:41 +00002776 status = be_cmd_mac_addr_query(adapter, mac, false,
2777 if_handle, *pmac_id);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002778 }
2779 } else if (be_physfn(adapter)) {
2780 /* For BE3, for PF get permanent MAC */
Sathya Perla5ee49792012-09-28 04:39:41 +00002781 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002782 *active_mac = false;
2783 } else {
2784 /* For BE3, for VF get soft MAC assigned by PF*/
Sathya Perla5ee49792012-09-28 04:39:41 +00002785 status = be_cmd_mac_addr_query(adapter, mac, false,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002786 if_handle, 0);
2787 *active_mac = true;
2788 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002789 return status;
2790}
2791
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002792static void be_get_resources(struct be_adapter *adapter)
2793{
2794 int status;
2795 bool profile_present = false;
2796
2797 if (lancer_chip(adapter)) {
2798 status = be_cmd_get_func_config(adapter);
2799
2800 if (!status)
2801 profile_present = true;
2802 }
2803
2804 if (profile_present) {
2805 /* Sanity fixes for Lancer */
2806 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2807 BE_UC_PMAC_COUNT);
2808 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2809 BE_NUM_VLANS_SUPPORTED);
2810 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2811 BE_MAX_MC);
2812 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2813 MAX_TX_QS);
2814 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2815 BE3_MAX_RSS_QS);
2816 adapter->max_event_queues = min_t(u16,
2817 adapter->max_event_queues,
2818 BE3_MAX_RSS_QS);
2819
2820 if (adapter->max_rss_queues &&
2821 adapter->max_rss_queues == adapter->max_rx_queues)
2822 adapter->max_rss_queues -= 1;
2823
2824 if (adapter->max_event_queues < adapter->max_rss_queues)
2825 adapter->max_rss_queues = adapter->max_event_queues;
2826
2827 } else {
2828 if (be_physfn(adapter))
2829 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2830 else
2831 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2832
2833 if (adapter->function_mode & FLEX10_MODE)
2834 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2835 else
2836 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2837
2838 adapter->max_mcast_mac = BE_MAX_MC;
2839 adapter->max_tx_queues = MAX_TX_QS;
2840 adapter->max_rss_queues = (adapter->be3_native) ?
2841 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2842 adapter->max_event_queues = BE3_MAX_RSS_QS;
2843
2844 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2845 BE_IF_FLAGS_BROADCAST |
2846 BE_IF_FLAGS_MULTICAST |
2847 BE_IF_FLAGS_PASS_L3L4_ERRORS |
2848 BE_IF_FLAGS_MCAST_PROMISCUOUS |
2849 BE_IF_FLAGS_VLAN_PROMISCUOUS |
2850 BE_IF_FLAGS_PROMISCUOUS;
2851
2852 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2853 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2854 }
2855}
2856
Sathya Perla39f1d942012-05-08 19:41:24 +00002857/* Routine to query per function resource limits */
2858static int be_get_config(struct be_adapter *adapter)
2859{
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002860 int pos, status;
Sathya Perla39f1d942012-05-08 19:41:24 +00002861 u16 dev_num_vfs;
2862
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002863 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2864 &adapter->function_mode,
2865 &adapter->function_caps);
2866 if (status)
2867 goto err;
2868
2869 be_get_resources(adapter);
2870
2871 /* primary mac needs 1 pmac entry */
2872 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2873 sizeof(u32), GFP_KERNEL);
2874 if (!adapter->pmac_id) {
2875 status = -ENOMEM;
2876 goto err;
2877 }
2878
Sathya Perla39f1d942012-05-08 19:41:24 +00002879 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2880 if (pos) {
2881 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2882 &dev_num_vfs);
Vasundhara Volam7c5a5242012-08-28 20:37:41 +00002883 if (!lancer_chip(adapter))
2884 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
Sathya Perla39f1d942012-05-08 19:41:24 +00002885 adapter->dev_num_vfs = dev_num_vfs;
2886 }
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002887err:
2888 return status;
Sathya Perla39f1d942012-05-08 19:41:24 +00002889}
2890
Sathya Perla5fb379e2009-06-18 00:02:59 +00002891static int be_setup(struct be_adapter *adapter)
2892{
Sathya Perla39f1d942012-05-08 19:41:24 +00002893 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002894 u32 en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002895 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002896 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002897 u8 mac[ETH_ALEN];
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002898 bool active_mac;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002899
Sathya Perla30128032011-11-10 19:17:57 +00002900 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002901
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002902 if (!lancer_chip(adapter))
2903 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00002904
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002905 status = be_get_config(adapter);
2906 if (status)
2907 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002908
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002909 be_msix_enable(adapter);
2910
2911 status = be_evt_queues_create(adapter);
2912 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002913 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002914
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002915 status = be_tx_cqs_create(adapter);
2916 if (status)
2917 goto err;
2918
2919 status = be_rx_cqs_create(adapter);
2920 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002921 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002922
Sathya Perla5fb379e2009-06-18 00:02:59 +00002923 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002924 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002925 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002926
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002927 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
2928 /* In UMC mode FW does not return right privileges.
2929 * Override with correct privilege equivalent to PF.
2930 */
2931 if (be_is_mc(adapter))
2932 adapter->cmd_privileges = MAX_PRIVILEGES;
2933
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002934 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2935 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002936
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002937 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002938 en_flags |= BE_IF_FLAGS_RSS;
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002939
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002940 en_flags = en_flags & adapter->if_cap_flags;
Padmanabh Ratnakar0b13fb42012-07-18 02:51:58 +00002941
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002942 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002943 &adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002944 if (status != 0)
2945 goto err;
2946
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002947 memset(mac, 0, ETH_ALEN);
2948 active_mac = false;
2949 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2950 &active_mac, &adapter->pmac_id[0]);
2951 if (status != 0)
2952 goto err;
2953
2954 if (!active_mac) {
2955 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2956 &adapter->pmac_id[0], 0);
2957 if (status != 0)
2958 goto err;
2959 }
2960
2961 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2962 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2963 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002964 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002965
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002966 status = be_tx_qs_create(adapter);
2967 if (status)
2968 goto err;
2969
Sathya Perla04b71172011-09-27 13:30:27 -04002970 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002971
Sathya Perla1d1e9a42012-06-05 19:37:17 +00002972 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00002973 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002974
2975 be_set_rx_mode(adapter->netdev);
2976
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002977 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002978
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002979 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2980 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00002981 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002982
Sathya Perla39f1d942012-05-08 19:41:24 +00002983 if (be_physfn(adapter) && num_vfs) {
2984 if (adapter->dev_num_vfs)
2985 be_vf_setup(adapter);
2986 else
2987 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002988 }
2989
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002990 status = be_cmd_get_phy_info(adapter);
2991 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002992 adapter->phy.fc_autoneg = 1;
2993
Sathya Perla191eb752012-02-23 18:50:13 +00002994 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2995 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002996 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002997err:
2998 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002999 return status;
3000}
3001
Ivan Vecera66268732011-12-08 01:31:21 +00003002#ifdef CONFIG_NET_POLL_CONTROLLER
3003static void be_netpoll(struct net_device *netdev)
3004{
3005 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003006 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003007 int i;
3008
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003009 for_all_evt_queues(adapter, eqo, i)
3010 event_handle(eqo);
3011
3012 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003013}
3014#endif
3015
Ajit Khaparde84517482009-09-04 03:12:16 +00003016#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003017char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3018
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003019static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003020 const u8 *p, u32 img_start, int image_size,
3021 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003022{
3023 u32 crc_offset;
3024 u8 flashed_crc[4];
3025 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003026
3027 crc_offset = hdr_size + img_start + image_size - 4;
3028
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003029 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003030
3031 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003032 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003033 if (status) {
3034 dev_err(&adapter->pdev->dev,
3035 "could not get crc from flash, not flashing redboot\n");
3036 return false;
3037 }
3038
3039 /*update redboot only if crc does not match*/
3040 if (!memcmp(flashed_crc, p, 4))
3041 return false;
3042 else
3043 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003044}
3045
Sathya Perla306f1342011-08-02 19:57:45 +00003046static bool phy_flashing_required(struct be_adapter *adapter)
3047{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003048 return (adapter->phy.phy_type == TN_8022 &&
3049 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003050}
3051
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003052static bool is_comp_in_ufi(struct be_adapter *adapter,
3053 struct flash_section_info *fsec, int type)
3054{
3055 int i = 0, img_type = 0;
3056 struct flash_section_info_g2 *fsec_g2 = NULL;
3057
3058 if (adapter->generation != BE_GEN3)
3059 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3060
3061 for (i = 0; i < MAX_FLASH_COMP; i++) {
3062 if (fsec_g2)
3063 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3064 else
3065 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3066
3067 if (img_type == type)
3068 return true;
3069 }
3070 return false;
3071
3072}
3073
3074struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3075 int header_size,
3076 const struct firmware *fw)
3077{
3078 struct flash_section_info *fsec = NULL;
3079 const u8 *p = fw->data;
3080
3081 p += header_size;
3082 while (p < (fw->data + fw->size)) {
3083 fsec = (struct flash_section_info *)p;
3084 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3085 return fsec;
3086 p += 32;
3087 }
3088 return NULL;
3089}
3090
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003091static int be_flash(struct be_adapter *adapter, const u8 *img,
3092 struct be_dma_mem *flash_cmd, int optype, int img_size)
3093{
3094 u32 total_bytes = 0, flash_op, num_bytes = 0;
3095 int status = 0;
3096 struct be_cmd_write_flashrom *req = flash_cmd->va;
3097
3098 total_bytes = img_size;
3099 while (total_bytes) {
3100 num_bytes = min_t(u32, 32*1024, total_bytes);
3101
3102 total_bytes -= num_bytes;
3103
3104 if (!total_bytes) {
3105 if (optype == OPTYPE_PHY_FW)
3106 flash_op = FLASHROM_OPER_PHY_FLASH;
3107 else
3108 flash_op = FLASHROM_OPER_FLASH;
3109 } else {
3110 if (optype == OPTYPE_PHY_FW)
3111 flash_op = FLASHROM_OPER_PHY_SAVE;
3112 else
3113 flash_op = FLASHROM_OPER_SAVE;
3114 }
3115
3116 memcpy(req->params.data_buf, img, num_bytes);
3117 img += num_bytes;
3118 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3119 flash_op, num_bytes);
3120 if (status) {
3121 if (status == ILLEGAL_IOCTL_REQ &&
3122 optype == OPTYPE_PHY_FW)
3123 break;
3124 dev_err(&adapter->pdev->dev,
3125 "cmd to write to flash rom failed.\n");
3126 return status;
3127 }
3128 }
3129 return 0;
3130}
3131
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003132static int be_flash_data(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003133 const struct firmware *fw,
3134 struct be_dma_mem *flash_cmd,
3135 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003136
Ajit Khaparde84517482009-09-04 03:12:16 +00003137{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003138 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003139 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003140 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003141 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003142 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003143 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003144
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003145 struct flash_comp gen3_flash_types[] = {
3146 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3147 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3148 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3149 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3150 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3151 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3152 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3153 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3154 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3155 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3156 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3157 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3158 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3159 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3160 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3161 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3162 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3163 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3164 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3165 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003166 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003167
3168 struct flash_comp gen2_flash_types[] = {
3169 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3170 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3171 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3172 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3173 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3174 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3175 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3176 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3177 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3178 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3179 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3180 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3181 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3182 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3183 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3184 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003185 };
3186
3187 if (adapter->generation == BE_GEN3) {
3188 pflashcomp = gen3_flash_types;
3189 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003190 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003191 } else {
3192 pflashcomp = gen2_flash_types;
3193 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003194 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003195 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003196 /* Get flash section info*/
3197 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3198 if (!fsec) {
3199 dev_err(&adapter->pdev->dev,
3200 "Invalid Cookie. UFI corrupted ?\n");
3201 return -1;
3202 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003203 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003204 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003205 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003206
3207 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3208 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3209 continue;
3210
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003211 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3212 !phy_flashing_required(adapter))
3213 continue;
3214
3215 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3216 redboot = be_flash_redboot(adapter, fw->data,
3217 pflashcomp[i].offset, pflashcomp[i].size,
3218 filehdr_size + img_hdrs_size);
3219 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003220 continue;
3221 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003222
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003223 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003224 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003225 if (p + pflashcomp[i].size > fw->data + fw->size)
3226 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003227
3228 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3229 pflashcomp[i].size);
3230 if (status) {
3231 dev_err(&adapter->pdev->dev,
3232 "Flashing section type %d failed.\n",
3233 pflashcomp[i].img_type);
3234 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003235 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003236 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003237 return 0;
3238}
3239
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003240static int be_flash_skyhawk(struct be_adapter *adapter,
3241 const struct firmware *fw,
3242 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003243{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003244 int status = 0, i, filehdr_size = 0;
3245 int img_offset, img_size, img_optype, redboot;
3246 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3247 const u8 *p = fw->data;
3248 struct flash_section_info *fsec = NULL;
3249
3250 filehdr_size = sizeof(struct flash_file_hdr_g3);
3251 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3252 if (!fsec) {
3253 dev_err(&adapter->pdev->dev,
3254 "Invalid Cookie. UFI corrupted ?\n");
3255 return -1;
3256 }
3257
3258 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3259 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3260 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3261
3262 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3263 case IMAGE_FIRMWARE_iSCSI:
3264 img_optype = OPTYPE_ISCSI_ACTIVE;
3265 break;
3266 case IMAGE_BOOT_CODE:
3267 img_optype = OPTYPE_REDBOOT;
3268 break;
3269 case IMAGE_OPTION_ROM_ISCSI:
3270 img_optype = OPTYPE_BIOS;
3271 break;
3272 case IMAGE_OPTION_ROM_PXE:
3273 img_optype = OPTYPE_PXE_BIOS;
3274 break;
3275 case IMAGE_OPTION_ROM_FCoE:
3276 img_optype = OPTYPE_FCOE_BIOS;
3277 break;
3278 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3279 img_optype = OPTYPE_ISCSI_BACKUP;
3280 break;
3281 case IMAGE_NCSI:
3282 img_optype = OPTYPE_NCSI_FW;
3283 break;
3284 default:
3285 continue;
3286 }
3287
3288 if (img_optype == OPTYPE_REDBOOT) {
3289 redboot = be_flash_redboot(adapter, fw->data,
3290 img_offset, img_size,
3291 filehdr_size + img_hdrs_size);
3292 if (!redboot)
3293 continue;
3294 }
3295
3296 p = fw->data;
3297 p += filehdr_size + img_offset + img_hdrs_size;
3298 if (p + img_size > fw->data + fw->size)
3299 return -1;
3300
3301 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3302 if (status) {
3303 dev_err(&adapter->pdev->dev,
3304 "Flashing section type %d failed.\n",
3305 fsec->fsec_entry[i].type);
3306 return status;
3307 }
3308 }
3309 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003310}
3311
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003312static int lancer_wait_idle(struct be_adapter *adapter)
3313{
3314#define SLIPORT_IDLE_TIMEOUT 30
3315 u32 reg_val;
3316 int status = 0, i;
3317
3318 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3319 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3320 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3321 break;
3322
3323 ssleep(1);
3324 }
3325
3326 if (i == SLIPORT_IDLE_TIMEOUT)
3327 status = -1;
3328
3329 return status;
3330}
3331
3332static int lancer_fw_reset(struct be_adapter *adapter)
3333{
3334 int status = 0;
3335
3336 status = lancer_wait_idle(adapter);
3337 if (status)
3338 return status;
3339
3340 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3341 PHYSDEV_CONTROL_OFFSET);
3342
3343 return status;
3344}
3345
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003346static int lancer_fw_download(struct be_adapter *adapter,
3347 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003348{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003349#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3350#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3351 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003352 const u8 *data_ptr = NULL;
3353 u8 *dest_image_ptr = NULL;
3354 size_t image_size = 0;
3355 u32 chunk_size = 0;
3356 u32 data_written = 0;
3357 u32 offset = 0;
3358 int status = 0;
3359 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003360 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003361
3362 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3363 dev_err(&adapter->pdev->dev,
3364 "FW Image not properly aligned. "
3365 "Length must be 4 byte aligned.\n");
3366 status = -EINVAL;
3367 goto lancer_fw_exit;
3368 }
3369
3370 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3371 + LANCER_FW_DOWNLOAD_CHUNK;
3372 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3373 &flash_cmd.dma, GFP_KERNEL);
3374 if (!flash_cmd.va) {
3375 status = -ENOMEM;
3376 dev_err(&adapter->pdev->dev,
3377 "Memory allocation failure while flashing\n");
3378 goto lancer_fw_exit;
3379 }
3380
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003381 dest_image_ptr = flash_cmd.va +
3382 sizeof(struct lancer_cmd_req_write_object);
3383 image_size = fw->size;
3384 data_ptr = fw->data;
3385
3386 while (image_size) {
3387 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3388
3389 /* Copy the image chunk content. */
3390 memcpy(dest_image_ptr, data_ptr, chunk_size);
3391
3392 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003393 chunk_size, offset,
3394 LANCER_FW_DOWNLOAD_LOCATION,
3395 &data_written, &change_status,
3396 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003397 if (status)
3398 break;
3399
3400 offset += data_written;
3401 data_ptr += data_written;
3402 image_size -= data_written;
3403 }
3404
3405 if (!status) {
3406 /* Commit the FW written */
3407 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003408 0, offset,
3409 LANCER_FW_DOWNLOAD_LOCATION,
3410 &data_written, &change_status,
3411 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003412 }
3413
3414 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3415 flash_cmd.dma);
3416 if (status) {
3417 dev_err(&adapter->pdev->dev,
3418 "Firmware load error. "
3419 "Status code: 0x%x Additional Status: 0x%x\n",
3420 status, add_status);
3421 goto lancer_fw_exit;
3422 }
3423
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003424 if (change_status == LANCER_FW_RESET_NEEDED) {
3425 status = lancer_fw_reset(adapter);
3426 if (status) {
3427 dev_err(&adapter->pdev->dev,
3428 "Adapter busy for FW reset.\n"
3429 "New FW will not be active.\n");
3430 goto lancer_fw_exit;
3431 }
3432 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3433 dev_err(&adapter->pdev->dev,
3434 "System reboot required for new FW"
3435 " to be active\n");
3436 }
3437
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003438 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3439lancer_fw_exit:
3440 return status;
3441}
3442
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003443static int be_get_ufi_gen(struct be_adapter *adapter,
3444 struct flash_file_hdr_g2 *fhdr)
3445{
3446 if (fhdr == NULL)
3447 goto be_get_ufi_exit;
3448
3449 if (adapter->generation == BE_GEN3) {
3450 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3451 return SH_HW;
3452 else if (!skyhawk_chip(adapter) && fhdr->build[0] == '3')
3453 return BE_GEN3;
3454 } else if (adapter->generation == BE_GEN2 && fhdr->build[0] == '2') {
3455 return BE_GEN2;
3456 }
3457
3458be_get_ufi_exit:
3459 dev_err(&adapter->pdev->dev,
3460 "UFI and Interface are not compatible for flashing\n");
3461 return -1;
3462}
3463
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003464static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3465{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003466 struct flash_file_hdr_g2 *fhdr;
3467 struct flash_file_hdr_g3 *fhdr3;
3468 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003469 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003470 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003471 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003472
Ajit Khaparde84517482009-09-04 03:12:16 +00003473 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003474 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3475 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003476 if (!flash_cmd.va) {
3477 status = -ENOMEM;
3478 dev_err(&adapter->pdev->dev,
3479 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003480 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003481 }
3482
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003483 p = fw->data;
3484 fhdr = (struct flash_file_hdr_g2 *)p;
3485
3486 ufi_type = be_get_ufi_gen(adapter, fhdr);
3487
3488 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
3489 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3490 for (i = 0; i < num_imgs; i++) {
3491 img_hdr_ptr = (struct image_hdr *)(fw->data +
3492 (sizeof(struct flash_file_hdr_g3) +
3493 i * sizeof(struct image_hdr)));
3494 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3495 if (ufi_type == SH_HW)
3496 status = be_flash_skyhawk(adapter, fw,
3497 &flash_cmd, num_imgs);
3498 else if (ufi_type == BE_GEN3)
3499 status = be_flash_data(adapter, fw,
3500 &flash_cmd, num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003501 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003502 }
3503
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003504 if (ufi_type == BE_GEN2)
3505 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3506 else if (ufi_type == -1)
3507 status = -1;
3508
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003509 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3510 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003511 if (status) {
3512 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003513 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003514 }
3515
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003516 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003517
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003518be_fw_exit:
3519 return status;
3520}
3521
3522int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3523{
3524 const struct firmware *fw;
3525 int status;
3526
3527 if (!netif_running(adapter->netdev)) {
3528 dev_err(&adapter->pdev->dev,
3529 "Firmware load not allowed (interface is down)\n");
3530 return -1;
3531 }
3532
3533 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3534 if (status)
3535 goto fw_exit;
3536
3537 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3538
3539 if (lancer_chip(adapter))
3540 status = lancer_fw_download(adapter, fw);
3541 else
3542 status = be_fw_download(adapter, fw);
3543
Ajit Khaparde84517482009-09-04 03:12:16 +00003544fw_exit:
3545 release_firmware(fw);
3546 return status;
3547}
3548
stephen hemmingere5686ad2012-01-05 19:10:25 +00003549static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003550 .ndo_open = be_open,
3551 .ndo_stop = be_close,
3552 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003553 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003554 .ndo_set_mac_address = be_mac_addr_set,
3555 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003556 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003557 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003558 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3559 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003560 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003561 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003562 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003563 .ndo_get_vf_config = be_get_vf_config,
3564#ifdef CONFIG_NET_POLL_CONTROLLER
3565 .ndo_poll_controller = be_netpoll,
3566#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003567};
3568
3569static void be_netdev_init(struct net_device *netdev)
3570{
3571 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003572 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003573 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003574
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003575 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003576 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3577 NETIF_F_HW_VLAN_TX;
3578 if (be_multi_rxq(adapter))
3579 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003580
3581 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003582 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003583
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003584 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003585 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003586
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003587 netdev->priv_flags |= IFF_UNICAST_FLT;
3588
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003589 netdev->flags |= IFF_MULTICAST;
3590
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00003591 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003592
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003593 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003594
3595 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3596
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003597 for_all_evt_queues(adapter, eqo, i)
3598 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003599}
3600
3601static void be_unmap_pci_bars(struct be_adapter *adapter)
3602{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003603 if (adapter->csr)
3604 iounmap(adapter->csr);
3605 if (adapter->db)
3606 iounmap(adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003607 if (adapter->roce_db.base)
3608 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3609}
3610
3611static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3612{
3613 struct pci_dev *pdev = adapter->pdev;
3614 u8 __iomem *addr;
3615
3616 addr = pci_iomap(pdev, 2, 0);
3617 if (addr == NULL)
3618 return -ENOMEM;
3619
3620 adapter->roce_db.base = addr;
3621 adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3622 adapter->roce_db.size = 8192;
3623 adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3624 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003625}
3626
3627static int be_map_pci_bars(struct be_adapter *adapter)
3628{
3629 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003630 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003631
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003632 if (lancer_chip(adapter)) {
Parav Pandit045508a2012-03-26 14:27:13 +00003633 if (be_type_2_3(adapter)) {
3634 addr = ioremap_nocache(
3635 pci_resource_start(adapter->pdev, 0),
3636 pci_resource_len(adapter->pdev, 0));
3637 if (addr == NULL)
3638 return -ENOMEM;
3639 adapter->db = addr;
3640 }
3641 if (adapter->if_type == SLI_INTF_TYPE_3) {
3642 if (lancer_roce_map_pci_bars(adapter))
3643 goto pci_map_err;
3644 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003645 return 0;
3646 }
3647
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003648 if (be_physfn(adapter)) {
3649 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3650 pci_resource_len(adapter->pdev, 2));
3651 if (addr == NULL)
3652 return -ENOMEM;
3653 adapter->csr = addr;
3654 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003655
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003656 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003657 db_reg = 4;
3658 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003659 if (be_physfn(adapter))
3660 db_reg = 4;
3661 else
3662 db_reg = 0;
3663 }
3664 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3665 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003666 if (addr == NULL)
3667 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003668 adapter->db = addr;
Parav Pandit045508a2012-03-26 14:27:13 +00003669 if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3670 adapter->roce_db.size = 4096;
3671 adapter->roce_db.io_addr =
3672 pci_resource_start(adapter->pdev, db_reg);
3673 adapter->roce_db.total_size =
3674 pci_resource_len(adapter->pdev, db_reg);
3675 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003676 return 0;
3677pci_map_err:
3678 be_unmap_pci_bars(adapter);
3679 return -ENOMEM;
3680}
3681
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003682static void be_ctrl_cleanup(struct be_adapter *adapter)
3683{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003684 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003685
3686 be_unmap_pci_bars(adapter);
3687
3688 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003689 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3690 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003691
Sathya Perla5b8821b2011-08-02 19:57:44 +00003692 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003693 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003694 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3695 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003696}
3697
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003698static int be_ctrl_init(struct be_adapter *adapter)
3699{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003700 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3701 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003702 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003703 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003704
3705 status = be_map_pci_bars(adapter);
3706 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003707 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003708
3709 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003710 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3711 mbox_mem_alloc->size,
3712 &mbox_mem_alloc->dma,
3713 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003714 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003715 status = -ENOMEM;
3716 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003717 }
3718 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3719 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3720 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3721 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003722
Sathya Perla5b8821b2011-08-02 19:57:44 +00003723 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3724 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3725 &rx_filter->dma, GFP_KERNEL);
3726 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003727 status = -ENOMEM;
3728 goto free_mbox;
3729 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003730 memset(rx_filter->va, 0, rx_filter->size);
Ivan Vecera29849612010-12-14 05:43:19 +00003731 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003732 spin_lock_init(&adapter->mcc_lock);
3733 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003734
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003735 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003736 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003737 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003738
3739free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003740 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3741 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003742
3743unmap_pci_bars:
3744 be_unmap_pci_bars(adapter);
3745
3746done:
3747 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003748}
3749
3750static void be_stats_cleanup(struct be_adapter *adapter)
3751{
Sathya Perla3abcded2010-10-03 22:12:27 -07003752 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003753
3754 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003755 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3756 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003757}
3758
3759static int be_stats_init(struct be_adapter *adapter)
3760{
Sathya Perla3abcded2010-10-03 22:12:27 -07003761 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003762
Selvin Xavier005d5692011-05-16 07:36:35 +00003763 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003764 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003765 } else {
3766 if (lancer_chip(adapter))
3767 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3768 else
3769 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3770 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003771 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3772 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003773 if (cmd->va == NULL)
3774 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003775 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003776 return 0;
3777}
3778
3779static void __devexit be_remove(struct pci_dev *pdev)
3780{
3781 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003782
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003783 if (!adapter)
3784 return;
3785
Parav Pandit045508a2012-03-26 14:27:13 +00003786 be_roce_dev_remove(adapter);
3787
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003788 cancel_delayed_work_sync(&adapter->func_recovery_work);
3789
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003790 unregister_netdev(adapter->netdev);
3791
Sathya Perla5fb379e2009-06-18 00:02:59 +00003792 be_clear(adapter);
3793
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00003794 /* tell fw we're done with firing cmds */
3795 be_cmd_fw_clean(adapter);
3796
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003797 be_stats_cleanup(adapter);
3798
3799 be_ctrl_cleanup(adapter);
3800
Sathya Perlad6b6d982012-09-05 01:56:48 +00003801 pci_disable_pcie_error_reporting(pdev);
3802
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003803 pci_set_drvdata(pdev, NULL);
3804 pci_release_regions(pdev);
3805 pci_disable_device(pdev);
3806
3807 free_netdev(adapter->netdev);
3808}
3809
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003810bool be_is_wol_supported(struct be_adapter *adapter)
3811{
3812 return ((adapter->wol_cap & BE_WOL_CAP) &&
3813 !be_is_wol_excluded(adapter)) ? true : false;
3814}
3815
Somnath Kotur941a77d2012-05-17 22:59:03 +00003816u32 be_get_fw_log_level(struct be_adapter *adapter)
3817{
3818 struct be_dma_mem extfat_cmd;
3819 struct be_fat_conf_params *cfgs;
3820 int status;
3821 u32 level = 0;
3822 int j;
3823
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003824 if (lancer_chip(adapter))
3825 return 0;
3826
Somnath Kotur941a77d2012-05-17 22:59:03 +00003827 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3828 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3829 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3830 &extfat_cmd.dma);
3831
3832 if (!extfat_cmd.va) {
3833 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3834 __func__);
3835 goto err;
3836 }
3837
3838 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3839 if (!status) {
3840 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3841 sizeof(struct be_cmd_resp_hdr));
Anton Blanchardac46a462012-07-24 15:05:25 +00003842 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
Somnath Kotur941a77d2012-05-17 22:59:03 +00003843 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3844 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3845 }
3846 }
3847 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3848 extfat_cmd.dma);
3849err:
3850 return level;
3851}
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003852
Sathya Perla39f1d942012-05-08 19:41:24 +00003853static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003854{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003855 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00003856 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003857
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003858 status = be_cmd_get_cntl_attributes(adapter);
3859 if (status)
3860 return status;
3861
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003862 status = be_cmd_get_acpi_wol_cap(adapter);
3863 if (status) {
3864 /* in case of a failure to get wol capabillities
3865 * check the exclusion list to determine WOL capability */
3866 if (!be_is_wol_excluded(adapter))
3867 adapter->wol_cap |= BE_WOL_CAP;
3868 }
3869
3870 if (be_is_wol_supported(adapter))
3871 adapter->wol = true;
3872
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00003873 /* Must be a power of 2 or else MODULO will BUG_ON */
3874 adapter->be_get_temp_freq = 64;
3875
Somnath Kotur941a77d2012-05-17 22:59:03 +00003876 level = be_get_fw_log_level(adapter);
3877 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3878
Sathya Perla2243e2e2009-11-22 22:02:03 +00003879 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003880}
3881
Sathya Perla39f1d942012-05-08 19:41:24 +00003882static int be_dev_type_check(struct be_adapter *adapter)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003883{
3884 struct pci_dev *pdev = adapter->pdev;
3885 u32 sli_intf = 0, if_type;
3886
3887 switch (pdev->device) {
3888 case BE_DEVICE_ID1:
3889 case OC_DEVICE_ID1:
3890 adapter->generation = BE_GEN2;
3891 break;
3892 case BE_DEVICE_ID2:
3893 case OC_DEVICE_ID2:
3894 adapter->generation = BE_GEN3;
3895 break;
3896 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003897 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003898 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
Parav Pandit045508a2012-03-26 14:27:13 +00003899 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3900 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003901 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3902 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003903 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
Parav Pandit045508a2012-03-26 14:27:13 +00003904 !be_type_2_3(adapter)) {
3905 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3906 return -EINVAL;
3907 }
3908 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3909 SLI_INTF_FAMILY_SHIFT);
3910 adapter->generation = BE_GEN3;
3911 break;
3912 case OC_DEVICE_ID5:
3913 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3914 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003915 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3916 return -EINVAL;
3917 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003918 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3919 SLI_INTF_FAMILY_SHIFT);
3920 adapter->generation = BE_GEN3;
3921 break;
3922 default:
3923 adapter->generation = 0;
3924 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003925
3926 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3927 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003928 return 0;
3929}
3930
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003931static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003932{
3933 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003934
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003935 status = lancer_test_and_set_rdy_state(adapter);
3936 if (status)
3937 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003938
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003939 if (netif_running(adapter->netdev))
3940 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003941
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003942 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003943
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003944 adapter->hw_error = false;
3945 adapter->fw_timeout = false;
3946
3947 status = be_setup(adapter);
3948 if (status)
3949 goto err;
3950
3951 if (netif_running(adapter->netdev)) {
3952 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003953 if (status)
3954 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003955 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003956
3957 dev_err(&adapter->pdev->dev,
3958 "Adapter SLIPORT recovery succeeded\n");
3959 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003960err:
Padmanabh Ratnakar67297ad2012-10-20 06:02:27 +00003961 if (adapter->eeh_error)
3962 dev_err(&adapter->pdev->dev,
3963 "Adapter SLIPORT recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003964
3965 return status;
3966}
3967
3968static void be_func_recovery_task(struct work_struct *work)
3969{
3970 struct be_adapter *adapter =
3971 container_of(work, struct be_adapter, func_recovery_work.work);
3972 int status;
3973
3974 be_detect_error(adapter);
3975
3976 if (adapter->hw_error && lancer_chip(adapter)) {
3977
3978 if (adapter->eeh_error)
3979 goto out;
3980
3981 rtnl_lock();
3982 netif_device_detach(adapter->netdev);
3983 rtnl_unlock();
3984
3985 status = lancer_recover_func(adapter);
3986
3987 if (!status)
3988 netif_device_attach(adapter->netdev);
3989 }
3990
3991out:
3992 schedule_delayed_work(&adapter->func_recovery_work,
3993 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003994}
3995
3996static void be_worker(struct work_struct *work)
3997{
3998 struct be_adapter *adapter =
3999 container_of(work, struct be_adapter, work.work);
4000 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004001 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004002 int i;
4003
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004004 /* when interrupts are not yet enabled, just reap any pending
4005 * mcc completions */
4006 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004007 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004008 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004009 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004010 goto reschedule;
4011 }
4012
4013 if (!adapter->stats_cmd_sent) {
4014 if (lancer_chip(adapter))
4015 lancer_cmd_get_pport_stats(adapter,
4016 &adapter->stats_cmd);
4017 else
4018 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4019 }
4020
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004021 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4022 be_cmd_get_die_temperature(adapter);
4023
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004024 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004025 if (rxo->rx_post_starved) {
4026 rxo->rx_post_starved = false;
4027 be_post_rx_frags(rxo, GFP_KERNEL);
4028 }
4029 }
4030
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004031 for_all_evt_queues(adapter, eqo, i)
4032 be_eqd_update(adapter, eqo);
4033
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004034reschedule:
4035 adapter->work_counter++;
4036 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4037}
4038
Sathya Perla39f1d942012-05-08 19:41:24 +00004039static bool be_reset_required(struct be_adapter *adapter)
4040{
Sathya Perlad79c0a22012-06-05 19:37:22 +00004041 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004042}
4043
Sathya Perlad3791422012-09-28 04:39:44 +00004044static char *mc_name(struct be_adapter *adapter)
4045{
4046 if (adapter->function_mode & FLEX10_MODE)
4047 return "FLEX10";
4048 else if (adapter->function_mode & VNIC_MODE)
4049 return "vNIC";
4050 else if (adapter->function_mode & UMC_ENABLED)
4051 return "UMC";
4052 else
4053 return "";
4054}
4055
4056static inline char *func_name(struct be_adapter *adapter)
4057{
4058 return be_physfn(adapter) ? "PF" : "VF";
4059}
4060
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004061static int __devinit be_probe(struct pci_dev *pdev,
4062 const struct pci_device_id *pdev_id)
4063{
4064 int status = 0;
4065 struct be_adapter *adapter;
4066 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004067 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004068
4069 status = pci_enable_device(pdev);
4070 if (status)
4071 goto do_none;
4072
4073 status = pci_request_regions(pdev, DRV_NAME);
4074 if (status)
4075 goto disable_dev;
4076 pci_set_master(pdev);
4077
Sathya Perla7f640062012-06-05 19:37:20 +00004078 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004079 if (netdev == NULL) {
4080 status = -ENOMEM;
4081 goto rel_reg;
4082 }
4083 adapter = netdev_priv(netdev);
4084 adapter->pdev = pdev;
4085 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004086
Sathya Perla39f1d942012-05-08 19:41:24 +00004087 status = be_dev_type_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00004088 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004089 goto free_netdev;
4090
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004091 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004092 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004093
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004094 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004095 if (!status) {
4096 netdev->features |= NETIF_F_HIGHDMA;
4097 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004098 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004099 if (status) {
4100 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4101 goto free_netdev;
4102 }
4103 }
4104
Sathya Perlad6b6d982012-09-05 01:56:48 +00004105 status = pci_enable_pcie_error_reporting(pdev);
4106 if (status)
4107 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4108
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004109 status = be_ctrl_init(adapter);
4110 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004111 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004112
Sathya Perla2243e2e2009-11-22 22:02:03 +00004113 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004114 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004115 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004116 if (status)
4117 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004118 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004119
4120 /* tell fw we're ready to fire cmds */
4121 status = be_cmd_fw_init(adapter);
4122 if (status)
4123 goto ctrl_clean;
4124
Sathya Perla39f1d942012-05-08 19:41:24 +00004125 if (be_reset_required(adapter)) {
4126 status = be_cmd_reset_function(adapter);
4127 if (status)
4128 goto ctrl_clean;
4129 }
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004130
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004131 /* The INTR bit may be set in the card when probed by a kdump kernel
4132 * after a crash.
4133 */
4134 if (!lancer_chip(adapter))
4135 be_intr_set(adapter, false);
4136
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004137 status = be_stats_init(adapter);
4138 if (status)
4139 goto ctrl_clean;
4140
Sathya Perla39f1d942012-05-08 19:41:24 +00004141 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004142 if (status)
4143 goto stats_clean;
4144
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004145 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004146 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004147 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004148
Sathya Perla5fb379e2009-06-18 00:02:59 +00004149 status = be_setup(adapter);
4150 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004151 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004152
Sathya Perla3abcded2010-10-03 22:12:27 -07004153 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004154 status = register_netdev(netdev);
4155 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004156 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004157
Parav Pandit045508a2012-03-26 14:27:13 +00004158 be_roce_dev_add(adapter);
4159
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004160 schedule_delayed_work(&adapter->func_recovery_work,
4161 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004162
4163 be_cmd_query_port_name(adapter, &port_name);
4164
Sathya Perlad3791422012-09-28 04:39:44 +00004165 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4166 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004167
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004168 return 0;
4169
Sathya Perla5fb379e2009-06-18 00:02:59 +00004170unsetup:
4171 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004172stats_clean:
4173 be_stats_cleanup(adapter);
4174ctrl_clean:
4175 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004176free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004177 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004178 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004179rel_reg:
4180 pci_release_regions(pdev);
4181disable_dev:
4182 pci_disable_device(pdev);
4183do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004184 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004185 return status;
4186}
4187
4188static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4189{
4190 struct be_adapter *adapter = pci_get_drvdata(pdev);
4191 struct net_device *netdev = adapter->netdev;
4192
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004193 if (adapter->wol)
4194 be_setup_wol(adapter, true);
4195
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004196 cancel_delayed_work_sync(&adapter->func_recovery_work);
4197
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004198 netif_device_detach(netdev);
4199 if (netif_running(netdev)) {
4200 rtnl_lock();
4201 be_close(netdev);
4202 rtnl_unlock();
4203 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004204 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004205
4206 pci_save_state(pdev);
4207 pci_disable_device(pdev);
4208 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4209 return 0;
4210}
4211
4212static int be_resume(struct pci_dev *pdev)
4213{
4214 int status = 0;
4215 struct be_adapter *adapter = pci_get_drvdata(pdev);
4216 struct net_device *netdev = adapter->netdev;
4217
4218 netif_device_detach(netdev);
4219
4220 status = pci_enable_device(pdev);
4221 if (status)
4222 return status;
4223
4224 pci_set_power_state(pdev, 0);
4225 pci_restore_state(pdev);
4226
Sathya Perla2243e2e2009-11-22 22:02:03 +00004227 /* tell fw we're ready to fire cmds */
4228 status = be_cmd_fw_init(adapter);
4229 if (status)
4230 return status;
4231
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004232 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004233 if (netif_running(netdev)) {
4234 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004235 be_open(netdev);
4236 rtnl_unlock();
4237 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004238
4239 schedule_delayed_work(&adapter->func_recovery_work,
4240 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004241 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004242
4243 if (adapter->wol)
4244 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004245
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004246 return 0;
4247}
4248
Sathya Perla82456b02010-02-17 01:35:37 +00004249/*
4250 * An FLR will stop BE from DMAing any data.
4251 */
4252static void be_shutdown(struct pci_dev *pdev)
4253{
4254 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004255
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004256 if (!adapter)
4257 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004258
Sathya Perla0f4a6822011-03-21 20:49:28 +00004259 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004260 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004261
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004262 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004263
Ajit Khaparde57841862011-04-06 18:08:43 +00004264 be_cmd_reset_function(adapter);
4265
Sathya Perla82456b02010-02-17 01:35:37 +00004266 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004267}
4268
Sathya Perlacf588472010-02-14 21:22:01 +00004269static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4270 pci_channel_state_t state)
4271{
4272 struct be_adapter *adapter = pci_get_drvdata(pdev);
4273 struct net_device *netdev = adapter->netdev;
4274
4275 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4276
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004277 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004278
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004279 cancel_delayed_work_sync(&adapter->func_recovery_work);
4280
4281 rtnl_lock();
Sathya Perlacf588472010-02-14 21:22:01 +00004282 netif_device_detach(netdev);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004283 rtnl_unlock();
Sathya Perlacf588472010-02-14 21:22:01 +00004284
4285 if (netif_running(netdev)) {
4286 rtnl_lock();
4287 be_close(netdev);
4288 rtnl_unlock();
4289 }
4290 be_clear(adapter);
4291
4292 if (state == pci_channel_io_perm_failure)
4293 return PCI_ERS_RESULT_DISCONNECT;
4294
4295 pci_disable_device(pdev);
4296
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004297 /* The error could cause the FW to trigger a flash debug dump.
4298 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004299 * can cause it not to recover; wait for it to finish.
4300 * Wait only for first function as it is needed only once per
4301 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004302 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004303 if (pdev->devfn == 0)
4304 ssleep(30);
4305
Sathya Perlacf588472010-02-14 21:22:01 +00004306 return PCI_ERS_RESULT_NEED_RESET;
4307}
4308
4309static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4310{
4311 struct be_adapter *adapter = pci_get_drvdata(pdev);
4312 int status;
4313
4314 dev_info(&adapter->pdev->dev, "EEH reset\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004315 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004316
4317 status = pci_enable_device(pdev);
4318 if (status)
4319 return PCI_ERS_RESULT_DISCONNECT;
4320
4321 pci_set_master(pdev);
4322 pci_set_power_state(pdev, 0);
4323 pci_restore_state(pdev);
4324
4325 /* Check if card is ok and fw is ready */
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004326 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004327 if (status)
4328 return PCI_ERS_RESULT_DISCONNECT;
4329
Sathya Perlad6b6d982012-09-05 01:56:48 +00004330 pci_cleanup_aer_uncorrect_error_status(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004331 return PCI_ERS_RESULT_RECOVERED;
4332}
4333
4334static void be_eeh_resume(struct pci_dev *pdev)
4335{
4336 int status = 0;
4337 struct be_adapter *adapter = pci_get_drvdata(pdev);
4338 struct net_device *netdev = adapter->netdev;
4339
4340 dev_info(&adapter->pdev->dev, "EEH resume\n");
4341
4342 pci_save_state(pdev);
4343
4344 /* tell fw we're ready to fire cmds */
4345 status = be_cmd_fw_init(adapter);
4346 if (status)
4347 goto err;
4348
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004349 status = be_cmd_reset_function(adapter);
4350 if (status)
4351 goto err;
4352
Sathya Perlacf588472010-02-14 21:22:01 +00004353 status = be_setup(adapter);
4354 if (status)
4355 goto err;
4356
4357 if (netif_running(netdev)) {
4358 status = be_open(netdev);
4359 if (status)
4360 goto err;
4361 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004362
4363 schedule_delayed_work(&adapter->func_recovery_work,
4364 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004365 netif_device_attach(netdev);
4366 return;
4367err:
4368 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004369}
4370
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004371static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004372 .error_detected = be_eeh_err_detected,
4373 .slot_reset = be_eeh_reset,
4374 .resume = be_eeh_resume,
4375};
4376
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004377static struct pci_driver be_driver = {
4378 .name = DRV_NAME,
4379 .id_table = be_dev_ids,
4380 .probe = be_probe,
4381 .remove = be_remove,
4382 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004383 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004384 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004385 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004386};
4387
4388static int __init be_init_module(void)
4389{
Joe Perches8e95a202009-12-03 07:58:21 +00004390 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4391 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004392 printk(KERN_WARNING DRV_NAME
4393 " : Module param rx_frag_size must be 2048/4096/8192."
4394 " Using 2048\n");
4395 rx_frag_size = 2048;
4396 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004397
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004398 return pci_register_driver(&be_driver);
4399}
4400module_init(be_init_module);
4401
4402static void __exit be_exit_module(void)
4403{
4404 pci_unregister_driver(&be_driver);
4405}
4406module_exit(be_exit_module);