blob: b41333184916dac17ff16f52827d24b34999b6ee [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamc7bb15a2013-03-06 20:05:05 +00002 * Copyright (C) 2005 - 2013 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070024
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000028MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_LICENSE("GPL");
30
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla11ac75e2011-12-13 00:58:50 +000035static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070048 { 0 }
49};
50MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000051/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070052static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000053 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85};
86/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070087static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000088 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700111 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700121
Sathya Perla752961a2011-10-24 02:45:03 +0000122/* Is BE in a multi-channel mode */
123static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127}
128
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700129static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130{
131 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000135 mem->va = NULL;
136 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
Joe Perches1f9061d22013-03-15 07:23:58 +0000149 GFP_KERNEL | __GFP_ZERO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000151 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 return 0;
153}
154
Somnath Kotur68c45a22013-03-14 02:42:07 +0000155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169
Sathya Perladb3ea782011-08-22 19:41:52 +0000170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Somnath Kotur68c45a22013-03-14 02:42:07 +0000174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
Sathya Perla8788fdc2009-07-27 22:52:03 +0000190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700191{
192 u32 val = 0;
193 val |= qid & DB_RQ_RING_ID_MASK;
194 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000195
196 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000197 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700198}
199
Sathya Perla8788fdc2009-07-27 22:52:03 +0000200static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700201{
202 u32 val = 0;
203 val |= qid & DB_TXULP_RING_ID_MASK;
204 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000205
206 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000207 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700208}
209
Sathya Perla8788fdc2009-07-27 22:52:03 +0000210static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700211 bool arm, bool clear_int, u16 num_popped)
212{
213 u32 val = 0;
214 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000215 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
216 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000217
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000218 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000219 return;
220
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700221 if (arm)
222 val |= 1 << DB_EQ_REARM_SHIFT;
223 if (clear_int)
224 val |= 1 << DB_EQ_CLR_SHIFT;
225 val |= 1 << DB_EQ_EVNT_SHIFT;
226 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000227 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228}
229
Sathya Perla8788fdc2009-07-27 22:52:03 +0000230void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231{
232 u32 val = 0;
233 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000234 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
235 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000236
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000237 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000238 return;
239
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700240 if (arm)
241 val |= 1 << DB_CQ_REARM_SHIFT;
242 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000243 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700244}
245
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700246static int be_mac_addr_set(struct net_device *netdev, void *p)
247{
248 struct be_adapter *adapter = netdev_priv(netdev);
249 struct sockaddr *addr = p;
250 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000251 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000252 u32 pmac_id = adapter->pmac_id[0];
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000253 bool active_mac = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700254
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000255 if (!is_valid_ether_addr(addr->sa_data))
256 return -EADDRNOTAVAIL;
257
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000258 /* For BE VF, MAC address is already activated by PF.
259 * Hence only operation left is updating netdev->devaddr.
260 * Update it if user is passing the same MAC which was used
261 * during configuring VF MAC from PF(Hypervisor).
262 */
263 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
264 status = be_cmd_mac_addr_query(adapter, current_mac,
265 false, adapter->if_handle, 0);
266 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
267 goto done;
268 else
269 goto err;
270 }
271
272 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
273 goto done;
274
275 /* For Lancer check if any MAC is active.
276 * If active, get its mac id.
277 */
278 if (lancer_chip(adapter) && !be_physfn(adapter))
279 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
280 &pmac_id, 0);
281
282 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
283 adapter->if_handle,
284 &adapter->pmac_id[0], 0);
285
Sathya Perlaa65027e2009-08-17 00:58:04 +0000286 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000287 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700288
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000289 if (active_mac)
290 be_cmd_pmac_del(adapter, adapter->if_handle,
291 pmac_id, 0);
292done:
Somnath Koture3a7ae22011-10-27 07:14:05 +0000293 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
294 return 0;
295err:
296 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700297 return status;
298}
299
Sathya Perlaca34fe32012-11-06 17:48:56 +0000300/* BE2 supports only v0 cmd */
301static void *hw_stats_from_cmd(struct be_adapter *adapter)
302{
303 if (BE2_chip(adapter)) {
304 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
305
306 return &cmd->hw_stats;
307 } else {
308 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
309
310 return &cmd->hw_stats;
311 }
312}
313
314/* BE2 supports only v0 cmd */
315static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
316{
317 if (BE2_chip(adapter)) {
318 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
319
320 return &hw_stats->erx;
321 } else {
322 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
323
324 return &hw_stats->erx;
325 }
326}
327
328static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000329{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000330 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
331 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
332 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000333 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000334 &rxf_stats->port[adapter->port_num];
335 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000336
Sathya Perlaac124ff2011-07-25 19:10:14 +0000337 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000338 drvs->rx_pause_frames = port_stats->rx_pause_frames;
339 drvs->rx_crc_errors = port_stats->rx_crc_errors;
340 drvs->rx_control_frames = port_stats->rx_control_frames;
341 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
342 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
343 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
344 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
345 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
346 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
347 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
348 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
349 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
350 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
351 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000352 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000353 drvs->rx_dropped_header_too_small =
354 port_stats->rx_dropped_header_too_small;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000355 drvs->rx_address_mismatch_drops =
356 port_stats->rx_address_mismatch_drops +
357 port_stats->rx_vlan_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000358 drvs->rx_alignment_symbol_errors =
359 port_stats->rx_alignment_symbol_errors;
360
361 drvs->tx_pauseframes = port_stats->tx_pauseframes;
362 drvs->tx_controlframes = port_stats->tx_controlframes;
363
364 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000365 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000366 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000367 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000368 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000369 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->forwarded_packets = rxf_stats->forwarded_packets;
371 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000372 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
373 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000374 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
375}
376
Sathya Perlaca34fe32012-11-06 17:48:56 +0000377static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000378{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000379 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
380 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
381 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000382 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000383 &rxf_stats->port[adapter->port_num];
384 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000385
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000387 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
388 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000389 drvs->rx_pause_frames = port_stats->rx_pause_frames;
390 drvs->rx_crc_errors = port_stats->rx_crc_errors;
391 drvs->rx_control_frames = port_stats->rx_control_frames;
392 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
393 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
394 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
395 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
396 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
397 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
398 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
399 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
400 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
401 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
402 drvs->rx_dropped_header_too_small =
403 port_stats->rx_dropped_header_too_small;
404 drvs->rx_input_fifo_overflow_drop =
405 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000406 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000407 drvs->rx_alignment_symbol_errors =
408 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000409 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000410 drvs->tx_pauseframes = port_stats->tx_pauseframes;
411 drvs->tx_controlframes = port_stats->tx_controlframes;
412 drvs->jabber_events = port_stats->jabber_events;
413 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000414 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000415 drvs->forwarded_packets = rxf_stats->forwarded_packets;
416 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000417 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
418 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000419 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
420}
421
Selvin Xavier005d5692011-05-16 07:36:35 +0000422static void populate_lancer_stats(struct be_adapter *adapter)
423{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000424
Selvin Xavier005d5692011-05-16 07:36:35 +0000425 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000426 struct lancer_pport_stats *pport_stats =
427 pport_stats_from_cmd(adapter);
428
429 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
430 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
431 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
432 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000433 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000434 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000435 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
436 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
437 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
438 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
439 drvs->rx_dropped_tcp_length =
440 pport_stats->rx_dropped_invalid_tcp_length;
441 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
442 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
443 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
444 drvs->rx_dropped_header_too_small =
445 pport_stats->rx_dropped_header_too_small;
446 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000447 drvs->rx_address_mismatch_drops =
448 pport_stats->rx_address_mismatch_drops +
449 pport_stats->rx_vlan_mismatch_drops;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000450 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000451 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000452 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
453 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000454 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000455 drvs->forwarded_packets = pport_stats->num_forwards_lo;
456 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000457 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000458 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000459}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000460
Sathya Perla09c1c682011-08-22 19:41:53 +0000461static void accumulate_16bit_val(u32 *acc, u16 val)
462{
463#define lo(x) (x & 0xFFFF)
464#define hi(x) (x & 0xFFFF0000)
465 bool wrapped = val < lo(*acc);
466 u32 newacc = hi(*acc) + val;
467
468 if (wrapped)
469 newacc += 65536;
470 ACCESS_ONCE(*acc) = newacc;
471}
472
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000473void be_parse_stats(struct be_adapter *adapter)
474{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000475 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
476 struct be_rx_obj *rxo;
477 int i;
478
Sathya Perlaca34fe32012-11-06 17:48:56 +0000479 if (lancer_chip(adapter)) {
480 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000481 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000482 if (BE2_chip(adapter))
483 populate_be_v0_stats(adapter);
484 else
485 /* for BE3 and Skyhawk */
486 populate_be_v1_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000487
Sathya Perlaca34fe32012-11-06 17:48:56 +0000488 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
489 for_all_rx_queues(adapter, rxo, i) {
490 /* below erx HW counter can actually wrap around after
491 * 65535. Driver accumulates a 32-bit value
492 */
493 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
494 (u16)erx->rx_drops_no_fragments \
495 [rxo->q.id]);
496 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000497 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000498}
499
Sathya Perlaab1594e2011-07-25 19:10:15 +0000500static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
501 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700502{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000503 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000504 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700505 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000506 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000507 u64 pkts, bytes;
508 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700509 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700510
Sathya Perla3abcded2010-10-03 22:12:27 -0700511 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000512 const struct be_rx_stats *rx_stats = rx_stats(rxo);
513 do {
514 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
515 pkts = rx_stats(rxo)->rx_pkts;
516 bytes = rx_stats(rxo)->rx_bytes;
517 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
518 stats->rx_packets += pkts;
519 stats->rx_bytes += bytes;
520 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
521 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
522 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700523 }
524
Sathya Perla3c8def92011-06-12 20:01:58 +0000525 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000526 const struct be_tx_stats *tx_stats = tx_stats(txo);
527 do {
528 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
529 pkts = tx_stats(txo)->tx_pkts;
530 bytes = tx_stats(txo)->tx_bytes;
531 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
532 stats->tx_packets += pkts;
533 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000534 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700535
536 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000537 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000538 drvs->rx_alignment_symbol_errors +
539 drvs->rx_in_range_errors +
540 drvs->rx_out_range_errors +
541 drvs->rx_frame_too_long +
542 drvs->rx_dropped_too_small +
543 drvs->rx_dropped_too_short +
544 drvs->rx_dropped_header_too_small +
545 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000546 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700547
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700548 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000549 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000550 drvs->rx_out_range_errors +
551 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000552
Sathya Perlaab1594e2011-07-25 19:10:15 +0000553 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700554
555 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000556 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000557
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700558 /* receiver fifo overrun */
559 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000560 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000561 drvs->rx_input_fifo_overflow_drop +
562 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000563 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700564}
565
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000566void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700567{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700568 struct net_device *netdev = adapter->netdev;
569
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000570 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000571 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000572 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700573 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000574
575 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
576 netif_carrier_on(netdev);
577 else
578 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700579}
580
Sathya Perla3c8def92011-06-12 20:01:58 +0000581static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000582 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700583{
Sathya Perla3c8def92011-06-12 20:01:58 +0000584 struct be_tx_stats *stats = tx_stats(txo);
585
Sathya Perlaab1594e2011-07-25 19:10:15 +0000586 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000587 stats->tx_reqs++;
588 stats->tx_wrbs += wrb_cnt;
589 stats->tx_bytes += copied;
590 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700591 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000592 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000593 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700594}
595
596/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000597static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
598 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700599{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700600 int cnt = (skb->len > skb->data_len);
601
602 cnt += skb_shinfo(skb)->nr_frags;
603
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700604 /* to account for hdr wrb */
605 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000606 if (lancer_chip(adapter) || !(cnt & 1)) {
607 *dummy = false;
608 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700609 /* add a dummy to make it an even num */
610 cnt++;
611 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000612 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700613 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
614 return cnt;
615}
616
617static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
618{
619 wrb->frag_pa_hi = upper_32_bits(addr);
620 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
621 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000622 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700623}
624
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000625static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
626 struct sk_buff *skb)
627{
628 u8 vlan_prio;
629 u16 vlan_tag;
630
631 vlan_tag = vlan_tx_tag_get(skb);
632 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
633 /* If vlan priority provided by OS is NOT in available bmap */
634 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
635 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
636 adapter->recommended_prio;
637
638 return vlan_tag;
639}
640
Somnath Kotur93040ae2012-06-26 22:32:10 +0000641static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
642{
643 return vlan_tx_tag_present(skb) || adapter->pvid;
644}
645
Somnath Koturcc4ce022010-10-21 07:11:14 -0700646static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
647 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000649 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700650
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651 memset(hdr, 0, sizeof(*hdr));
652
653 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
654
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000655 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700656 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
657 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
658 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000659 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000660 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
662 if (is_tcp_pkt(skb))
663 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
664 else if (is_udp_pkt(skb))
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
666 }
667
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700668 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700669 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000670 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700671 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700672 }
673
674 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
675 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
676 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
677 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
678}
679
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000680static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000681 bool unmap_single)
682{
683 dma_addr_t dma;
684
685 be_dws_le_to_cpu(wrb, sizeof(*wrb));
686
687 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000688 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000689 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000690 dma_unmap_single(dev, dma, wrb->frag_len,
691 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000692 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000693 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000694 }
695}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700696
Sathya Perla3c8def92011-06-12 20:01:58 +0000697static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700698 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
699{
Sathya Perla7101e112010-03-22 20:41:12 +0000700 dma_addr_t busaddr;
701 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000702 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700703 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700704 struct be_eth_wrb *wrb;
705 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000706 bool map_single = false;
707 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700708
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700709 hdr = queue_head_node(txq);
710 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000711 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700712
David S. Millerebc8d2a2009-06-09 01:01:31 -0700713 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700714 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000715 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
716 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000717 goto dma_err;
718 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700719 wrb = queue_head_node(txq);
720 wrb_fill(wrb, busaddr, len);
721 be_dws_cpu_to_le(wrb, sizeof(*wrb));
722 queue_head_inc(txq);
723 copied += len;
724 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700725
David S. Millerebc8d2a2009-06-09 01:01:31 -0700726 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000727 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700728 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000729 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000730 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000731 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000732 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700733 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000734 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700735 be_dws_cpu_to_le(wrb, sizeof(*wrb));
736 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000737 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700738 }
739
740 if (dummy_wrb) {
741 wrb = queue_head_node(txq);
742 wrb_fill(wrb, 0, 0);
743 be_dws_cpu_to_le(wrb, sizeof(*wrb));
744 queue_head_inc(txq);
745 }
746
Somnath Koturcc4ce022010-10-21 07:11:14 -0700747 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700748 be_dws_cpu_to_le(hdr, sizeof(*hdr));
749
750 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000751dma_err:
752 txq->head = map_head;
753 while (copied) {
754 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000755 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000756 map_single = false;
757 copied -= wrb->frag_len;
758 queue_head_inc(txq);
759 }
760 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700761}
762
Somnath Kotur93040ae2012-06-26 22:32:10 +0000763static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
764 struct sk_buff *skb)
765{
766 u16 vlan_tag = 0;
767
768 skb = skb_share_check(skb, GFP_ATOMIC);
769 if (unlikely(!skb))
770 return skb;
771
772 if (vlan_tx_tag_present(skb)) {
773 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
774 __vlan_put_tag(skb, vlan_tag);
775 skb->vlan_tci = 0;
776 }
777
778 return skb;
779}
780
Stephen Hemminger613573252009-08-31 19:50:58 +0000781static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700782 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700783{
784 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000785 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
786 struct be_queue_info *txq = &txo->q;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000787 struct iphdr *ip = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700788 u32 wrb_cnt = 0, copied = 0;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000789 u32 start = txq->head, eth_hdr_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700790 bool dummy_wrb, stopped = false;
791
Somnath Kotur93040ae2012-06-26 22:32:10 +0000792 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
793 VLAN_ETH_HLEN : ETH_HLEN;
794
795 /* HW has a bug which considers padding bytes as legal
796 * and modifies the IPv4 hdr's 'tot_len' field
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000797 */
Somnath Kotur93040ae2012-06-26 22:32:10 +0000798 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
799 is_ipv4_pkt(skb)) {
800 ip = (struct iphdr *)ip_hdr(skb);
801 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
802 }
803
804 /* HW has a bug wherein it will calculate CSUM for VLAN
805 * pkts even though it is disabled.
806 * Manually insert VLAN in pkt.
807 */
808 if (skb->ip_summed != CHECKSUM_PARTIAL &&
809 be_vlan_tag_chk(adapter, skb)) {
810 skb = be_insert_vlan_in_pkt(adapter, skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000811 if (unlikely(!skb))
812 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000813 }
814
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000815 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700816
Sathya Perla3c8def92011-06-12 20:01:58 +0000817 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000818 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000819 int gso_segs = skb_shinfo(skb)->gso_segs;
820
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000821 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000822 BUG_ON(txo->sent_skb_list[start]);
823 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700824
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000825 /* Ensure txq has space for the next skb; Else stop the queue
826 * *BEFORE* ringing the tx doorbell, so that we serialze the
827 * tx compls of the current transmit which'll wake up the queue
828 */
Sathya Perla7101e112010-03-22 20:41:12 +0000829 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000830 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
831 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000832 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000833 stopped = true;
834 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700835
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000836 be_txq_notify(adapter, txq->id, wrb_cnt);
837
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000838 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000839 } else {
840 txq->head = start;
841 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700842 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000843tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700844 return NETDEV_TX_OK;
845}
846
847static int be_change_mtu(struct net_device *netdev, int new_mtu)
848{
849 struct be_adapter *adapter = netdev_priv(netdev);
850 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000851 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
852 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700853 dev_info(&adapter->pdev->dev,
854 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000855 BE_MIN_MTU,
856 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700857 return -EINVAL;
858 }
859 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
860 netdev->mtu, new_mtu);
861 netdev->mtu = new_mtu;
862 return 0;
863}
864
865/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000866 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
867 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700868 */
Sathya Perla10329df2012-06-05 19:37:18 +0000869static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700870{
Sathya Perla10329df2012-06-05 19:37:18 +0000871 u16 vids[BE_NUM_VLANS_SUPPORTED];
872 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000873 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000874
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000875 /* No need to further configure vids if in promiscuous mode */
876 if (adapter->promiscuous)
877 return 0;
878
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000879 if (adapter->vlans_added > adapter->max_vlans)
880 goto set_vlan_promisc;
881
882 /* Construct VLAN Table to give to HW */
883 for (i = 0; i < VLAN_N_VID; i++)
884 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +0000885 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000886
887 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +0000888 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000889
890 /* Set to VLAN promisc mode as setting VLAN filter failed */
891 if (status) {
892 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
893 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
894 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700895 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000896
Sathya Perlab31c50a2009-09-17 10:30:13 -0700897 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000898
899set_vlan_promisc:
900 status = be_cmd_vlan_config(adapter, adapter->if_handle,
901 NULL, 0, 1, 1);
902 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700903}
904
Patrick McHardy80d5c362013-04-19 02:04:28 +0000905static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700906{
907 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000908 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700909
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000910 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000911 status = -EINVAL;
912 goto ret;
913 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000914
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000915 /* Packets with VID 0 are always received by Lancer by default */
916 if (lancer_chip(adapter) && vid == 0)
917 goto ret;
918
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700919 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000920 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Sathya Perla10329df2012-06-05 19:37:18 +0000921 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500922
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000923 if (!status)
924 adapter->vlans_added++;
925 else
926 adapter->vlan_tag[vid] = 0;
927ret:
928 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700929}
930
Patrick McHardy80d5c362013-04-19 02:04:28 +0000931static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700932{
933 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000934 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700935
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000936 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000937 status = -EINVAL;
938 goto ret;
939 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000940
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000941 /* Packets with VID 0 are always received by Lancer by default */
942 if (lancer_chip(adapter) && vid == 0)
943 goto ret;
944
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700945 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000946 if (adapter->vlans_added <= adapter->max_vlans)
Sathya Perla10329df2012-06-05 19:37:18 +0000947 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500948
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000949 if (!status)
950 adapter->vlans_added--;
951 else
952 adapter->vlan_tag[vid] = 1;
953ret:
954 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700955}
956
Sathya Perlaa54769f2011-10-24 02:45:00 +0000957static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700958{
959 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000960 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700961
962 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000963 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000964 adapter->promiscuous = true;
965 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700966 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000967
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300968 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000969 if (adapter->promiscuous) {
970 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000971 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000972
973 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +0000974 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +0000975 }
976
Sathya Perlae7b909a2009-11-22 22:01:10 +0000977 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000978 if (netdev->flags & IFF_ALLMULTI ||
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +0000979 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000980 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000981 goto done;
982 }
983
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000984 if (netdev_uc_count(netdev) != adapter->uc_macs) {
985 struct netdev_hw_addr *ha;
986 int i = 1; /* First slot is claimed by the Primary MAC */
987
988 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
989 be_cmd_pmac_del(adapter, adapter->if_handle,
990 adapter->pmac_id[i], 0);
991 }
992
993 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
994 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
995 adapter->promiscuous = true;
996 goto done;
997 }
998
999 netdev_for_each_uc_addr(ha, adapter->netdev) {
1000 adapter->uc_macs++; /* First slot is for Primary MAC */
1001 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1002 adapter->if_handle,
1003 &adapter->pmac_id[adapter->uc_macs], 0);
1004 }
1005 }
1006
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001007 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1008
1009 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1010 if (status) {
1011 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1012 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1013 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1014 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001015done:
1016 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001017}
1018
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001019static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1020{
1021 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001022 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001023 int status;
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +00001024 bool active_mac = false;
1025 u32 pmac_id;
1026 u8 old_mac[ETH_ALEN];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001027
Sathya Perla11ac75e2011-12-13 00:58:50 +00001028 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001029 return -EPERM;
1030
Sathya Perla11ac75e2011-12-13 00:58:50 +00001031 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001032 return -EINVAL;
1033
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001034 if (lancer_chip(adapter)) {
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +00001035 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1036 &pmac_id, vf + 1);
1037 if (!status && active_mac)
1038 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1039 pmac_id, vf + 1);
1040
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001041 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1042 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +00001043 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1044 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001045
Sathya Perla11ac75e2011-12-13 00:58:50 +00001046 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1047 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001048 }
1049
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001050 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001051 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1052 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001053 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001054 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001055
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001056 return status;
1057}
1058
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001059static int be_get_vf_config(struct net_device *netdev, int vf,
1060 struct ifla_vf_info *vi)
1061{
1062 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001063 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001064
Sathya Perla11ac75e2011-12-13 00:58:50 +00001065 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001066 return -EPERM;
1067
Sathya Perla11ac75e2011-12-13 00:58:50 +00001068 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001069 return -EINVAL;
1070
1071 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001072 vi->tx_rate = vf_cfg->tx_rate;
1073 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001074 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001075 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001076
1077 return 0;
1078}
1079
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001080static int be_set_vf_vlan(struct net_device *netdev,
1081 int vf, u16 vlan, u8 qos)
1082{
1083 struct be_adapter *adapter = netdev_priv(netdev);
1084 int status = 0;
1085
Sathya Perla11ac75e2011-12-13 00:58:50 +00001086 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001087 return -EPERM;
1088
Sathya Perla11ac75e2011-12-13 00:58:50 +00001089 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001090 return -EINVAL;
1091
1092 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001093 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1094 /* If this is new value, program it. Else skip. */
1095 adapter->vf_cfg[vf].vlan_tag = vlan;
1096
1097 status = be_cmd_set_hsw_config(adapter, vlan,
1098 vf + 1, adapter->vf_cfg[vf].if_handle);
1099 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001100 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001101 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001102 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001103 vlan = adapter->vf_cfg[vf].def_vid;
1104 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1105 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001106 }
1107
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001108
1109 if (status)
1110 dev_info(&adapter->pdev->dev,
1111 "VLAN %d config on VF %d failed\n", vlan, vf);
1112 return status;
1113}
1114
Ajit Khapardee1d18732010-07-23 01:52:13 +00001115static int be_set_vf_tx_rate(struct net_device *netdev,
1116 int vf, int rate)
1117{
1118 struct be_adapter *adapter = netdev_priv(netdev);
1119 int status = 0;
1120
Sathya Perla11ac75e2011-12-13 00:58:50 +00001121 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001122 return -EPERM;
1123
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001124 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001125 return -EINVAL;
1126
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001127 if (rate < 100 || rate > 10000) {
1128 dev_err(&adapter->pdev->dev,
1129 "tx rate must be between 100 and 10000 Mbps\n");
1130 return -EINVAL;
1131 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001132
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001133 if (lancer_chip(adapter))
1134 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1135 else
1136 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001137
1138 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001139 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001140 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001141 else
1142 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001143 return status;
1144}
1145
Sathya Perla39f1d942012-05-08 19:41:24 +00001146static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1147{
1148 struct pci_dev *dev, *pdev = adapter->pdev;
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001149 int vfs = 0, assigned_vfs = 0, pos;
Sathya Perla39f1d942012-05-08 19:41:24 +00001150 u16 offset, stride;
1151
1152 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
Sathya Perlad79c0a22012-06-05 19:37:22 +00001153 if (!pos)
1154 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00001155 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1156 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1157
1158 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1159 while (dev) {
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001160 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
Sathya Perla39f1d942012-05-08 19:41:24 +00001161 vfs++;
1162 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1163 assigned_vfs++;
1164 }
1165 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1166 }
1167 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1168}
1169
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001170static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001171{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001172 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001173 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001174 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001175 u64 pkts;
1176 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001177
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001178 if (!eqo->enable_aic) {
1179 eqd = eqo->eqd;
1180 goto modify_eqd;
1181 }
1182
1183 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001184 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001185
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001186 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1187
Sathya Perla4097f662009-03-24 16:40:13 -07001188 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001189 if (time_before(now, stats->rx_jiffies)) {
1190 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001191 return;
1192 }
1193
Sathya Perlaac124ff2011-07-25 19:10:14 +00001194 /* Update once a second */
1195 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001196 return;
1197
Sathya Perlaab1594e2011-07-25 19:10:15 +00001198 do {
1199 start = u64_stats_fetch_begin_bh(&stats->sync);
1200 pkts = stats->rx_pkts;
1201 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1202
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001203 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001204 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001205 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001206 eqd = (stats->rx_pps / 110000) << 3;
1207 eqd = min(eqd, eqo->max_eqd);
1208 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001209 if (eqd < 10)
1210 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001211
1212modify_eqd:
1213 if (eqd != eqo->cur_eqd) {
1214 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1215 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001216 }
Sathya Perla4097f662009-03-24 16:40:13 -07001217}
1218
Sathya Perla3abcded2010-10-03 22:12:27 -07001219static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001220 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001221{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001222 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001223
Sathya Perlaab1594e2011-07-25 19:10:15 +00001224 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001225 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001226 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001227 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001228 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001229 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001230 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001231 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001232 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001233}
1234
Sathya Perla2e588f82011-03-11 02:49:26 +00001235static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001236{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001237 /* L4 checksum is not reliable for non TCP/UDP packets.
1238 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001239 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1240 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001241}
1242
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001243static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1244 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001245{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001246 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001247 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001248 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001249
Sathya Perla3abcded2010-10-03 22:12:27 -07001250 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001251 BUG_ON(!rx_page_info->page);
1252
Ajit Khaparde205859a2010-02-09 01:34:21 +00001253 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001254 dma_unmap_page(&adapter->pdev->dev,
1255 dma_unmap_addr(rx_page_info, bus),
1256 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001257 rx_page_info->last_page_user = false;
1258 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001259
1260 atomic_dec(&rxq->used);
1261 return rx_page_info;
1262}
1263
1264/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001265static void be_rx_compl_discard(struct be_rx_obj *rxo,
1266 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001267{
Sathya Perla3abcded2010-10-03 22:12:27 -07001268 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001269 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001270 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001271
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001272 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001273 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001274 put_page(page_info->page);
1275 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001276 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001277 }
1278}
1279
1280/*
1281 * skb_fill_rx_data forms a complete skb for an ether frame
1282 * indicated by rxcp.
1283 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001284static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1285 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001286{
Sathya Perla3abcded2010-10-03 22:12:27 -07001287 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001288 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001289 u16 i, j;
1290 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001291 u8 *start;
1292
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001293 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001294 start = page_address(page_info->page) + page_info->page_offset;
1295 prefetch(start);
1296
1297 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001298 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001299
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001300 skb->len = curr_frag_len;
1301 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001302 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001303 /* Complete packet has now been moved to data */
1304 put_page(page_info->page);
1305 skb->data_len = 0;
1306 skb->tail += curr_frag_len;
1307 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001308 hdr_len = ETH_HLEN;
1309 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001310 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001311 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001312 skb_shinfo(skb)->frags[0].page_offset =
1313 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001314 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001315 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001316 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001317 skb->tail += hdr_len;
1318 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001319 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001320
Sathya Perla2e588f82011-03-11 02:49:26 +00001321 if (rxcp->pkt_size <= rx_frag_size) {
1322 BUG_ON(rxcp->num_rcvd != 1);
1323 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001324 }
1325
1326 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001327 index_inc(&rxcp->rxq_idx, rxq->len);
1328 remaining = rxcp->pkt_size - curr_frag_len;
1329 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001330 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001331 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001332
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001333 /* Coalesce all frags from the same physical page in one slot */
1334 if (page_info->page_offset == 0) {
1335 /* Fresh page */
1336 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001337 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001338 skb_shinfo(skb)->frags[j].page_offset =
1339 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001340 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001341 skb_shinfo(skb)->nr_frags++;
1342 } else {
1343 put_page(page_info->page);
1344 }
1345
Eric Dumazet9e903e02011-10-18 21:00:24 +00001346 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001347 skb->len += curr_frag_len;
1348 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001349 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001350 remaining -= curr_frag_len;
1351 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001352 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001353 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001354 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001355}
1356
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001357/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001358static void be_rx_compl_process(struct be_rx_obj *rxo,
1359 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001360{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001361 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001362 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001363 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001364
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001365 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001366 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001367 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001368 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001369 return;
1370 }
1371
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001372 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001373
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001374 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001375 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001376 else
1377 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001378
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001379 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001380 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001381 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001382 skb->rxhash = rxcp->rss_hash;
1383
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001384
Jiri Pirko343e43c2011-08-25 02:50:51 +00001385 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001386 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1387
1388 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001389}
1390
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001391/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001392void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1393 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001394{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001395 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001396 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001397 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001398 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001399 u16 remaining, curr_frag_len;
1400 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001401
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001402 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001403 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001404 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001405 return;
1406 }
1407
Sathya Perla2e588f82011-03-11 02:49:26 +00001408 remaining = rxcp->pkt_size;
1409 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001410 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001411
1412 curr_frag_len = min(remaining, rx_frag_size);
1413
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001414 /* Coalesce all frags from the same physical page in one slot */
1415 if (i == 0 || page_info->page_offset == 0) {
1416 /* First frag or Fresh page */
1417 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001418 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001419 skb_shinfo(skb)->frags[j].page_offset =
1420 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001421 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001422 } else {
1423 put_page(page_info->page);
1424 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001425 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001426 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001427 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001428 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001429 memset(page_info, 0, sizeof(*page_info));
1430 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001431 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001432
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001433 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001434 skb->len = rxcp->pkt_size;
1435 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001436 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001437 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001438 if (adapter->netdev->features & NETIF_F_RXHASH)
1439 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001440
Jiri Pirko343e43c2011-08-25 02:50:51 +00001441 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001442 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1443
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001444 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001445}
1446
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001447static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1448 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001449{
Sathya Perla2e588f82011-03-11 02:49:26 +00001450 rxcp->pkt_size =
1451 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1452 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1453 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1454 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001455 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001456 rxcp->ip_csum =
1457 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1458 rxcp->l4_csum =
1459 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1460 rxcp->ipv6 =
1461 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1462 rxcp->rxq_idx =
1463 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1464 rxcp->num_rcvd =
1465 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1466 rxcp->pkt_type =
1467 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001468 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001469 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001470 if (rxcp->vlanf) {
1471 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001472 compl);
1473 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1474 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001475 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001476 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001477}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001478
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001479static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1480 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001481{
1482 rxcp->pkt_size =
1483 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1484 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1485 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1486 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001487 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001488 rxcp->ip_csum =
1489 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1490 rxcp->l4_csum =
1491 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1492 rxcp->ipv6 =
1493 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1494 rxcp->rxq_idx =
1495 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1496 rxcp->num_rcvd =
1497 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1498 rxcp->pkt_type =
1499 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001500 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001501 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001502 if (rxcp->vlanf) {
1503 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001504 compl);
1505 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1506 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001507 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001508 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001509}
1510
1511static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1512{
1513 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1514 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1515 struct be_adapter *adapter = rxo->adapter;
1516
1517 /* For checking the valid bit it is Ok to use either definition as the
1518 * valid bit is at the same position in both v0 and v1 Rx compl */
1519 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001520 return NULL;
1521
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001522 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001523 be_dws_le_to_cpu(compl, sizeof(*compl));
1524
1525 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001526 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001527 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001528 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001529
Sathya Perla15d72182011-03-21 20:49:26 +00001530 if (rxcp->vlanf) {
1531 /* vlanf could be wrongly set in some cards.
1532 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001533 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001534 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001535
Sathya Perla15d72182011-03-21 20:49:26 +00001536 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001537 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001538
Somnath Kotur939cf302011-08-18 21:51:49 -07001539 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001540 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001541 rxcp->vlanf = 0;
1542 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001543
1544 /* As the compl has been parsed, reset it; we wont touch it again */
1545 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001546
Sathya Perla3abcded2010-10-03 22:12:27 -07001547 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001548 return rxcp;
1549}
1550
Eric Dumazet1829b082011-03-01 05:48:12 +00001551static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001552{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001553 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001554
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001555 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001556 gfp |= __GFP_COMP;
1557 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001558}
1559
1560/*
1561 * Allocate a page, split it to fragments of size rx_frag_size and post as
1562 * receive buffers to BE
1563 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001564static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001565{
Sathya Perla3abcded2010-10-03 22:12:27 -07001566 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001567 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001568 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001569 struct page *pagep = NULL;
1570 struct be_eth_rx_d *rxd;
1571 u64 page_dmaaddr = 0, frag_dmaaddr;
1572 u32 posted, page_offset = 0;
1573
Sathya Perla3abcded2010-10-03 22:12:27 -07001574 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001575 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1576 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001577 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001578 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001579 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001580 break;
1581 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001582 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1583 0, adapter->big_page_size,
1584 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001585 page_info->page_offset = 0;
1586 } else {
1587 get_page(pagep);
1588 page_info->page_offset = page_offset + rx_frag_size;
1589 }
1590 page_offset = page_info->page_offset;
1591 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001592 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001593 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1594
1595 rxd = queue_head_node(rxq);
1596 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1597 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001598
1599 /* Any space left in the current big page for another frag? */
1600 if ((page_offset + rx_frag_size + rx_frag_size) >
1601 adapter->big_page_size) {
1602 pagep = NULL;
1603 page_info->last_page_user = true;
1604 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001605
1606 prev_page_info = page_info;
1607 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001608 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001609 }
1610 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001611 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001612
1613 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001614 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001615 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001616 } else if (atomic_read(&rxq->used) == 0) {
1617 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001618 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001619 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001620}
1621
Sathya Perla5fb379e2009-06-18 00:02:59 +00001622static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001623{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001624 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1625
1626 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1627 return NULL;
1628
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001629 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001630 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1631
1632 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1633
1634 queue_tail_inc(tx_cq);
1635 return txcp;
1636}
1637
Sathya Perla3c8def92011-06-12 20:01:58 +00001638static u16 be_tx_compl_process(struct be_adapter *adapter,
1639 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001640{
Sathya Perla3c8def92011-06-12 20:01:58 +00001641 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001642 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001643 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001644 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001645 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1646 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001647
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001648 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001649 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001650 sent_skbs[txq->tail] = NULL;
1651
1652 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001653 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001654
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001655 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001656 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001657 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001658 unmap_tx_frag(&adapter->pdev->dev, wrb,
1659 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001660 unmap_skb_hdr = false;
1661
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001662 num_wrbs++;
1663 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001664 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001665
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001666 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001667 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001668}
1669
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001670/* Return the number of events in the event queue */
1671static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001672{
1673 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001674 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001675
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001676 do {
1677 eqe = queue_tail_node(&eqo->q);
1678 if (eqe->evt == 0)
1679 break;
1680
1681 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001682 eqe->evt = 0;
1683 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001684 queue_tail_inc(&eqo->q);
1685 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001686
1687 return num;
1688}
1689
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001690/* Leaves the EQ is disarmed state */
1691static void be_eq_clean(struct be_eq_obj *eqo)
1692{
1693 int num = events_get(eqo);
1694
1695 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1696}
1697
1698static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001699{
1700 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001701 struct be_queue_info *rxq = &rxo->q;
1702 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001703 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00001704 struct be_adapter *adapter = rxo->adapter;
1705 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001706 u16 tail;
1707
Sathya Perlad23e9462012-12-17 19:38:51 +00001708 /* Consume pending rx completions.
1709 * Wait for the flush completion (identified by zero num_rcvd)
1710 * to arrive. Notify CQ even when there are no more CQ entries
1711 * for HW to flush partially coalesced CQ entries.
1712 * In Lancer, there is no need to wait for flush compl.
1713 */
1714 for (;;) {
1715 rxcp = be_rx_compl_get(rxo);
1716 if (rxcp == NULL) {
1717 if (lancer_chip(adapter))
1718 break;
1719
1720 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1721 dev_warn(&adapter->pdev->dev,
1722 "did not receive flush compl\n");
1723 break;
1724 }
1725 be_cq_notify(adapter, rx_cq->id, true, 0);
1726 mdelay(1);
1727 } else {
1728 be_rx_compl_discard(rxo, rxcp);
1729 be_cq_notify(adapter, rx_cq->id, true, 1);
1730 if (rxcp->num_rcvd == 0)
1731 break;
1732 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001733 }
1734
Sathya Perlad23e9462012-12-17 19:38:51 +00001735 /* After cleanup, leave the CQ in unarmed state */
1736 be_cq_notify(adapter, rx_cq->id, false, 0);
1737
1738 /* Then free posted rx buffers that were not used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001739 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001740 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001741 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001742 put_page(page_info->page);
1743 memset(page_info, 0, sizeof(*page_info));
1744 }
1745 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001746 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001747}
1748
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001749static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001750{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001751 struct be_tx_obj *txo;
1752 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001753 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001754 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001755 struct sk_buff *sent_skb;
1756 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001757 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001758
Sathya Perlaa8e91792009-08-10 03:42:43 +00001759 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1760 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001761 pending_txqs = adapter->num_tx_qs;
1762
1763 for_all_tx_queues(adapter, txo, i) {
1764 txq = &txo->q;
1765 while ((txcp = be_tx_compl_get(&txo->cq))) {
1766 end_idx =
1767 AMAP_GET_BITS(struct amap_eth_tx_compl,
1768 wrb_index, txcp);
1769 num_wrbs += be_tx_compl_process(adapter, txo,
1770 end_idx);
1771 cmpl++;
1772 }
1773 if (cmpl) {
1774 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1775 atomic_sub(num_wrbs, &txq->used);
1776 cmpl = 0;
1777 num_wrbs = 0;
1778 }
1779 if (atomic_read(&txq->used) == 0)
1780 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001781 }
1782
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001783 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001784 break;
1785
1786 mdelay(1);
1787 } while (true);
1788
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001789 for_all_tx_queues(adapter, txo, i) {
1790 txq = &txo->q;
1791 if (atomic_read(&txq->used))
1792 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1793 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001794
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001795 /* free posted tx for which compls will never arrive */
1796 while (atomic_read(&txq->used)) {
1797 sent_skb = txo->sent_skb_list[txq->tail];
1798 end_idx = txq->tail;
1799 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1800 &dummy_wrb);
1801 index_adv(&end_idx, num_wrbs - 1, txq->len);
1802 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1803 atomic_sub(num_wrbs, &txq->used);
1804 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001805 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001806}
1807
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001808static void be_evt_queues_destroy(struct be_adapter *adapter)
1809{
1810 struct be_eq_obj *eqo;
1811 int i;
1812
1813 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001814 if (eqo->q.created) {
1815 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001816 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001817 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001818 be_queue_free(adapter, &eqo->q);
1819 }
1820}
1821
1822static int be_evt_queues_create(struct be_adapter *adapter)
1823{
1824 struct be_queue_info *eq;
1825 struct be_eq_obj *eqo;
1826 int i, rc;
1827
1828 adapter->num_evt_qs = num_irqs(adapter);
1829
1830 for_all_evt_queues(adapter, eqo, i) {
1831 eqo->adapter = adapter;
1832 eqo->tx_budget = BE_TX_BUDGET;
1833 eqo->idx = i;
1834 eqo->max_eqd = BE_MAX_EQD;
1835 eqo->enable_aic = true;
1836
1837 eq = &eqo->q;
1838 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1839 sizeof(struct be_eq_entry));
1840 if (rc)
1841 return rc;
1842
1843 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1844 if (rc)
1845 return rc;
1846 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001847 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001848}
1849
Sathya Perla5fb379e2009-06-18 00:02:59 +00001850static void be_mcc_queues_destroy(struct be_adapter *adapter)
1851{
1852 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001853
Sathya Perla8788fdc2009-07-27 22:52:03 +00001854 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001855 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001856 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001857 be_queue_free(adapter, q);
1858
Sathya Perla8788fdc2009-07-27 22:52:03 +00001859 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001860 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001861 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001862 be_queue_free(adapter, q);
1863}
1864
1865/* Must be called only after TX qs are created as MCC shares TX EQ */
1866static int be_mcc_queues_create(struct be_adapter *adapter)
1867{
1868 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001869
Sathya Perla8788fdc2009-07-27 22:52:03 +00001870 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001871 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001872 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001873 goto err;
1874
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001875 /* Use the default EQ for MCC completions */
1876 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001877 goto mcc_cq_free;
1878
Sathya Perla8788fdc2009-07-27 22:52:03 +00001879 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001880 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1881 goto mcc_cq_destroy;
1882
Sathya Perla8788fdc2009-07-27 22:52:03 +00001883 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001884 goto mcc_q_free;
1885
1886 return 0;
1887
1888mcc_q_free:
1889 be_queue_free(adapter, q);
1890mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001891 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001892mcc_cq_free:
1893 be_queue_free(adapter, cq);
1894err:
1895 return -1;
1896}
1897
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001898static void be_tx_queues_destroy(struct be_adapter *adapter)
1899{
1900 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001901 struct be_tx_obj *txo;
1902 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001903
Sathya Perla3c8def92011-06-12 20:01:58 +00001904 for_all_tx_queues(adapter, txo, i) {
1905 q = &txo->q;
1906 if (q->created)
1907 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1908 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001909
Sathya Perla3c8def92011-06-12 20:01:58 +00001910 q = &txo->cq;
1911 if (q->created)
1912 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1913 be_queue_free(adapter, q);
1914 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001915}
1916
Sathya Perladafc0fe2011-10-24 02:45:02 +00001917static int be_num_txqs_want(struct be_adapter *adapter)
1918{
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001919 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1920 be_is_mc(adapter) ||
1921 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
Sathya Perlaca34fe32012-11-06 17:48:56 +00001922 BE2_chip(adapter))
Sathya Perladafc0fe2011-10-24 02:45:02 +00001923 return 1;
1924 else
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001925 return adapter->max_tx_queues;
Sathya Perladafc0fe2011-10-24 02:45:02 +00001926}
1927
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001928static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001929{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001930 struct be_queue_info *cq, *eq;
1931 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001932 struct be_tx_obj *txo;
1933 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001934
Sathya Perladafc0fe2011-10-24 02:45:02 +00001935 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001936 if (adapter->num_tx_qs != MAX_TX_QS) {
1937 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001938 netif_set_real_num_tx_queues(adapter->netdev,
1939 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001940 rtnl_unlock();
1941 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001942
Sathya Perla3c8def92011-06-12 20:01:58 +00001943 for_all_tx_queues(adapter, txo, i) {
1944 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001945 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1946 sizeof(struct be_eth_tx_compl));
1947 if (status)
1948 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001949
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001950 /* If num_evt_qs is less than num_tx_qs, then more than
1951 * one txq share an eq
1952 */
1953 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1954 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1955 if (status)
1956 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001957 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001958 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001959}
1960
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001961static int be_tx_qs_create(struct be_adapter *adapter)
1962{
1963 struct be_tx_obj *txo;
1964 int i, status;
1965
1966 for_all_tx_queues(adapter, txo, i) {
1967 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1968 sizeof(struct be_eth_wrb));
1969 if (status)
1970 return status;
1971
1972 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1973 if (status)
1974 return status;
1975 }
1976
Sathya Perlad3791422012-09-28 04:39:44 +00001977 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1978 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001979 return 0;
1980}
1981
1982static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001983{
1984 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001985 struct be_rx_obj *rxo;
1986 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001987
Sathya Perla3abcded2010-10-03 22:12:27 -07001988 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001989 q = &rxo->cq;
1990 if (q->created)
1991 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1992 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001993 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001994}
1995
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001996static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001997{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001998 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001999 struct be_rx_obj *rxo;
2000 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002001
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002002 /* We'll create as many RSS rings as there are irqs.
2003 * But when there's only one irq there's no use creating RSS rings
2004 */
2005 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2006 num_irqs(adapter) + 1 : 1;
Sathya Perla7f640062012-06-05 19:37:20 +00002007 if (adapter->num_rx_qs != MAX_RX_QS) {
2008 rtnl_lock();
2009 netif_set_real_num_rx_queues(adapter->netdev,
2010 adapter->num_rx_qs);
2011 rtnl_unlock();
2012 }
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002013
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002014 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002015 for_all_rx_queues(adapter, rxo, i) {
2016 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002017 cq = &rxo->cq;
2018 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2019 sizeof(struct be_eth_rx_compl));
2020 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002021 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002022
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002023 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2024 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002025 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002026 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002027 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002028
Sathya Perlad3791422012-09-28 04:39:44 +00002029 dev_info(&adapter->pdev->dev,
2030 "created %d RSS queue(s) and 1 default RX queue\n",
2031 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002032 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002033}
2034
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002035static irqreturn_t be_intx(int irq, void *dev)
2036{
Sathya Perlae49cc342012-11-27 19:50:02 +00002037 struct be_eq_obj *eqo = dev;
2038 struct be_adapter *adapter = eqo->adapter;
2039 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002040
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002041 /* IRQ is not expected when NAPI is scheduled as the EQ
2042 * will not be armed.
2043 * But, this can happen on Lancer INTx where it takes
2044 * a while to de-assert INTx or in BE2 where occasionaly
2045 * an interrupt may be raised even when EQ is unarmed.
2046 * If NAPI is already scheduled, then counting & notifying
2047 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002048 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002049 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002050 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002051 __napi_schedule(&eqo->napi);
2052 if (num_evts)
2053 eqo->spurious_intr = 0;
2054 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002055 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002056
2057 /* Return IRQ_HANDLED only for the the first spurious intr
2058 * after a valid intr to stop the kernel from branding
2059 * this irq as a bad one!
2060 */
2061 if (num_evts || eqo->spurious_intr++ == 0)
2062 return IRQ_HANDLED;
2063 else
2064 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002065}
2066
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002067static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002068{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002069 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002070
Sathya Perla0b545a62012-11-23 00:27:18 +00002071 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2072 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002073 return IRQ_HANDLED;
2074}
2075
Sathya Perla2e588f82011-03-11 02:49:26 +00002076static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002077{
Sathya Perla2e588f82011-03-11 02:49:26 +00002078 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002079}
2080
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002081static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2082 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002083{
Sathya Perla3abcded2010-10-03 22:12:27 -07002084 struct be_adapter *adapter = rxo->adapter;
2085 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002086 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002087 u32 work_done;
2088
2089 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002090 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002091 if (!rxcp)
2092 break;
2093
Sathya Perla12004ae2011-08-02 19:57:46 +00002094 /* Is it a flush compl that has no data */
2095 if (unlikely(rxcp->num_rcvd == 0))
2096 goto loop_continue;
2097
2098 /* Discard compl with partial DMA Lancer B0 */
2099 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002100 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002101 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002102 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002103
Sathya Perla12004ae2011-08-02 19:57:46 +00002104 /* On BE drop pkts that arrive due to imperfect filtering in
2105 * promiscuous mode on some skews
2106 */
2107 if (unlikely(rxcp->port != adapter->port_num &&
2108 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002109 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002110 goto loop_continue;
2111 }
2112
2113 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002114 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002115 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002116 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002117loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002118 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002119 }
2120
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002121 if (work_done) {
2122 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002123
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002124 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2125 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002126 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002127
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002128 return work_done;
2129}
2130
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002131static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2132 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002133{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002134 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002135 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002136
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002137 for (work_done = 0; work_done < budget; work_done++) {
2138 txcp = be_tx_compl_get(&txo->cq);
2139 if (!txcp)
2140 break;
2141 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002142 AMAP_GET_BITS(struct amap_eth_tx_compl,
2143 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002144 }
2145
2146 if (work_done) {
2147 be_cq_notify(adapter, txo->cq.id, true, work_done);
2148 atomic_sub(num_wrbs, &txo->q.used);
2149
2150 /* As Tx wrbs have been freed up, wake up netdev queue
2151 * if it was stopped due to lack of tx wrbs. */
2152 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2153 atomic_read(&txo->q.used) < txo->q.len / 2) {
2154 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002155 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002156
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002157 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2158 tx_stats(txo)->tx_compl += work_done;
2159 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2160 }
2161 return (work_done < budget); /* Done */
2162}
Sathya Perla3c8def92011-06-12 20:01:58 +00002163
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002164int be_poll(struct napi_struct *napi, int budget)
2165{
2166 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2167 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002168 int max_work = 0, work, i, num_evts;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002169 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002170
Sathya Perla0b545a62012-11-23 00:27:18 +00002171 num_evts = events_get(eqo);
2172
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002173 /* Process all TXQs serviced by this EQ */
2174 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2175 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2176 eqo->tx_budget, i);
2177 if (!tx_done)
2178 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002179 }
2180
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002181 /* This loop will iterate twice for EQ0 in which
2182 * completions of the last RXQ (default one) are also processed
2183 * For other EQs the loop iterates only once
2184 */
2185 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2186 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2187 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002188 }
2189
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002190 if (is_mcc_eqo(eqo))
2191 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002192
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002193 if (max_work < budget) {
2194 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002195 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002196 } else {
2197 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002198 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002199 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002200 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002201}
2202
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002203void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002204{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002205 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2206 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002207 u32 i;
2208
Sathya Perlad23e9462012-12-17 19:38:51 +00002209 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002210 return;
2211
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002212 if (lancer_chip(adapter)) {
2213 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2214 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2215 sliport_err1 = ioread32(adapter->db +
2216 SLIPORT_ERROR1_OFFSET);
2217 sliport_err2 = ioread32(adapter->db +
2218 SLIPORT_ERROR2_OFFSET);
2219 }
2220 } else {
2221 pci_read_config_dword(adapter->pdev,
2222 PCICFG_UE_STATUS_LOW, &ue_lo);
2223 pci_read_config_dword(adapter->pdev,
2224 PCICFG_UE_STATUS_HIGH, &ue_hi);
2225 pci_read_config_dword(adapter->pdev,
2226 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2227 pci_read_config_dword(adapter->pdev,
2228 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002229
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002230 ue_lo = (ue_lo & ~ue_lo_mask);
2231 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002232 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002233
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002234 /* On certain platforms BE hardware can indicate spurious UEs.
2235 * Allow the h/w to stop working completely in case of a real UE.
2236 * Hence not setting the hw_error for UE detection.
2237 */
2238 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002239 adapter->hw_error = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002240 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002241 "Error detected in the card\n");
2242 }
2243
2244 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2245 dev_err(&adapter->pdev->dev,
2246 "ERR: sliport status 0x%x\n", sliport_status);
2247 dev_err(&adapter->pdev->dev,
2248 "ERR: sliport error1 0x%x\n", sliport_err1);
2249 dev_err(&adapter->pdev->dev,
2250 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002251 }
2252
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002253 if (ue_lo) {
2254 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2255 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002256 dev_err(&adapter->pdev->dev,
2257 "UE: %s bit set\n", ue_status_low_desc[i]);
2258 }
2259 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002260
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002261 if (ue_hi) {
2262 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2263 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002264 dev_err(&adapter->pdev->dev,
2265 "UE: %s bit set\n", ue_status_hi_desc[i]);
2266 }
2267 }
2268
2269}
2270
Sathya Perla8d56ff12009-11-22 22:02:26 +00002271static void be_msix_disable(struct be_adapter *adapter)
2272{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002273 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002274 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002275 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002276 }
2277}
2278
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002279static uint be_num_rss_want(struct be_adapter *adapter)
2280{
Yuval Mintz30e80b52012-07-01 03:19:00 +00002281 u32 num = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002282
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002283 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002284 (lancer_chip(adapter) ||
2285 (!sriov_want(adapter) && be_physfn(adapter)))) {
2286 num = adapter->max_rss_queues;
Yuval Mintz30e80b52012-07-01 03:19:00 +00002287 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2288 }
2289 return num;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002290}
2291
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002292static void be_msix_enable(struct be_adapter *adapter)
2293{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002294#define BE_MIN_MSIX_VECTORS 1
Parav Pandit045508a2012-03-26 14:27:13 +00002295 int i, status, num_vec, num_roce_vec = 0;
Sathya Perlad3791422012-09-28 04:39:44 +00002296 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002297
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002298 /* If RSS queues are not used, need a vec for default RX Q */
2299 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
Parav Pandit045508a2012-03-26 14:27:13 +00002300 if (be_roce_supported(adapter)) {
2301 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2302 (num_online_cpus() + 1));
2303 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2304 num_vec += num_roce_vec;
2305 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2306 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002307 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002308
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002309 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002310 adapter->msix_entries[i].entry = i;
2311
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002312 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002313 if (status == 0) {
2314 goto done;
2315 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002316 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002317 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002318 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002319 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002320 }
Sathya Perlad3791422012-09-28 04:39:44 +00002321
2322 dev_warn(dev, "MSIx enable failed\n");
Sathya Perla3abcded2010-10-03 22:12:27 -07002323 return;
2324done:
Parav Pandit045508a2012-03-26 14:27:13 +00002325 if (be_roce_supported(adapter)) {
2326 if (num_vec > num_roce_vec) {
2327 adapter->num_msix_vec = num_vec - num_roce_vec;
2328 adapter->num_msix_roce_vec =
2329 num_vec - adapter->num_msix_vec;
2330 } else {
2331 adapter->num_msix_vec = num_vec;
2332 adapter->num_msix_roce_vec = 0;
2333 }
2334 } else
2335 adapter->num_msix_vec = num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002336 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002337 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002338}
2339
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002340static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002341 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002342{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002343 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002344}
2345
2346static int be_msix_register(struct be_adapter *adapter)
2347{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002348 struct net_device *netdev = adapter->netdev;
2349 struct be_eq_obj *eqo;
2350 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002351
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002352 for_all_evt_queues(adapter, eqo, i) {
2353 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2354 vec = be_msix_vec_get(adapter, eqo);
2355 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002356 if (status)
2357 goto err_msix;
2358 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002359
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002360 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002361err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002362 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2363 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2364 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2365 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002366 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002367 return status;
2368}
2369
2370static int be_irq_register(struct be_adapter *adapter)
2371{
2372 struct net_device *netdev = adapter->netdev;
2373 int status;
2374
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002375 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002376 status = be_msix_register(adapter);
2377 if (status == 0)
2378 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002379 /* INTx is not supported for VF */
2380 if (!be_physfn(adapter))
2381 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002382 }
2383
Sathya Perlae49cc342012-11-27 19:50:02 +00002384 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002385 netdev->irq = adapter->pdev->irq;
2386 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002387 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002388 if (status) {
2389 dev_err(&adapter->pdev->dev,
2390 "INTx request IRQ failed - err %d\n", status);
2391 return status;
2392 }
2393done:
2394 adapter->isr_registered = true;
2395 return 0;
2396}
2397
2398static void be_irq_unregister(struct be_adapter *adapter)
2399{
2400 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002401 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002402 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002403
2404 if (!adapter->isr_registered)
2405 return;
2406
2407 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002408 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002409 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002410 goto done;
2411 }
2412
2413 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002414 for_all_evt_queues(adapter, eqo, i)
2415 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002416
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002417done:
2418 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002419}
2420
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002421static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002422{
2423 struct be_queue_info *q;
2424 struct be_rx_obj *rxo;
2425 int i;
2426
2427 for_all_rx_queues(adapter, rxo, i) {
2428 q = &rxo->q;
2429 if (q->created) {
2430 be_cmd_rxq_destroy(adapter, q);
2431 /* After the rxq is invalidated, wait for a grace time
2432 * of 1ms for all dma to end and the flush compl to
2433 * arrive
2434 */
2435 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002436 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002437 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002438 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002439 }
2440}
2441
Sathya Perla889cd4b2010-05-30 23:33:45 +00002442static int be_close(struct net_device *netdev)
2443{
2444 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002445 struct be_eq_obj *eqo;
2446 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002447
Parav Pandit045508a2012-03-26 14:27:13 +00002448 be_roce_dev_close(adapter);
2449
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002450 for_all_evt_queues(adapter, eqo, i)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002451 napi_disable(&eqo->napi);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002452
2453 be_async_mcc_disable(adapter);
2454
2455 /* Wait for all pending tx completions to arrive so that
2456 * all tx skbs are freed.
2457 */
2458 be_tx_compl_clean(adapter);
2459
2460 be_rx_qs_destroy(adapter);
2461
2462 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002463 if (msix_enabled(adapter))
2464 synchronize_irq(be_msix_vec_get(adapter, eqo));
2465 else
2466 synchronize_irq(netdev->irq);
2467 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002468 }
2469
Sathya Perla889cd4b2010-05-30 23:33:45 +00002470 be_irq_unregister(adapter);
2471
Sathya Perla482c9e72011-06-29 23:33:17 +00002472 return 0;
2473}
2474
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002475static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002476{
2477 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002478 int rc, i, j;
2479 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002480
2481 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002482 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2483 sizeof(struct be_eth_rx_d));
2484 if (rc)
2485 return rc;
2486 }
2487
2488 /* The FW would like the default RXQ to be created first */
2489 rxo = default_rxo(adapter);
2490 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2491 adapter->if_handle, false, &rxo->rss_id);
2492 if (rc)
2493 return rc;
2494
2495 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002496 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002497 rx_frag_size, adapter->if_handle,
2498 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002499 if (rc)
2500 return rc;
2501 }
2502
2503 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002504 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2505 for_all_rss_queues(adapter, rxo, i) {
2506 if ((j + i) >= 128)
2507 break;
2508 rsstable[j + i] = rxo->rss_id;
2509 }
2510 }
2511 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002512 if (rc)
2513 return rc;
2514 }
2515
2516 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002517 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002518 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002519 return 0;
2520}
2521
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002522static int be_open(struct net_device *netdev)
2523{
2524 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002525 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002526 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002527 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002528 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002529 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002530
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002531 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002532 if (status)
2533 goto err;
2534
Sathya Perla5fb379e2009-06-18 00:02:59 +00002535 be_irq_register(adapter);
2536
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002537 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002538 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002539
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002540 for_all_tx_queues(adapter, txo, i)
2541 be_cq_notify(adapter, txo->cq.id, true, 0);
2542
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002543 be_async_mcc_enable(adapter);
2544
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002545 for_all_evt_queues(adapter, eqo, i) {
2546 napi_enable(&eqo->napi);
2547 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2548 }
2549
Sathya Perla323ff712012-09-28 04:39:43 +00002550 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002551 if (!status)
2552 be_link_status_update(adapter, link_status);
2553
Parav Pandit045508a2012-03-26 14:27:13 +00002554 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002555 return 0;
2556err:
2557 be_close(adapter->netdev);
2558 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002559}
2560
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002561static int be_setup_wol(struct be_adapter *adapter, bool enable)
2562{
2563 struct be_dma_mem cmd;
2564 int status = 0;
2565 u8 mac[ETH_ALEN];
2566
2567 memset(mac, 0, ETH_ALEN);
2568
2569 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002570 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
Joe Perches1f9061d22013-03-15 07:23:58 +00002571 GFP_KERNEL | __GFP_ZERO);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002572 if (cmd.va == NULL)
2573 return -1;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002574
2575 if (enable) {
2576 status = pci_write_config_dword(adapter->pdev,
2577 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2578 if (status) {
2579 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002580 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002581 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2582 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002583 return status;
2584 }
2585 status = be_cmd_enable_magic_wol(adapter,
2586 adapter->netdev->dev_addr, &cmd);
2587 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2588 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2589 } else {
2590 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2591 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2592 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2593 }
2594
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002595 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002596 return status;
2597}
2598
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002599/*
2600 * Generate a seed MAC address from the PF MAC Address using jhash.
2601 * MAC Address for VFs are assigned incrementally starting from the seed.
2602 * These addresses are programmed in the ASIC by the PF and the VF driver
2603 * queries for the MAC address during its probe.
2604 */
Sathya Perla4c876612013-02-03 20:30:11 +00002605static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002606{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002607 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002608 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002609 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002610 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002611
2612 be_vf_eth_addr_generate(adapter, mac);
2613
Sathya Perla11ac75e2011-12-13 00:58:50 +00002614 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002615 if (lancer_chip(adapter)) {
2616 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2617 } else {
2618 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002619 vf_cfg->if_handle,
2620 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002621 }
2622
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002623 if (status)
2624 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002625 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002626 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002627 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002628
2629 mac[5] += 1;
2630 }
2631 return status;
2632}
2633
Sathya Perla4c876612013-02-03 20:30:11 +00002634static int be_vfs_mac_query(struct be_adapter *adapter)
2635{
2636 int status, vf;
2637 u8 mac[ETH_ALEN];
2638 struct be_vf_cfg *vf_cfg;
2639 bool active;
2640
2641 for_all_vfs(adapter, vf_cfg, vf) {
2642 be_cmd_get_mac_from_list(adapter, mac, &active,
2643 &vf_cfg->pmac_id, 0);
2644
2645 status = be_cmd_mac_addr_query(adapter, mac, false,
2646 vf_cfg->if_handle, 0);
2647 if (status)
2648 return status;
2649 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2650 }
2651 return 0;
2652}
2653
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002654static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002655{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002656 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002657 u32 vf;
2658
Sathya Perla39f1d942012-05-08 19:41:24 +00002659 if (be_find_vfs(adapter, ASSIGNED)) {
Sathya Perla4c876612013-02-03 20:30:11 +00002660 dev_warn(&adapter->pdev->dev,
2661 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00002662 goto done;
2663 }
2664
Sathya Perla11ac75e2011-12-13 00:58:50 +00002665 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002666 if (lancer_chip(adapter))
2667 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2668 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002669 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2670 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002671
Sathya Perla11ac75e2011-12-13 00:58:50 +00002672 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2673 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002674 pci_disable_sriov(adapter->pdev);
2675done:
2676 kfree(adapter->vf_cfg);
2677 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002678}
2679
Sathya Perlaa54769f2011-10-24 02:45:00 +00002680static int be_clear(struct be_adapter *adapter)
2681{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002682 int i = 1;
2683
Sathya Perla191eb752012-02-23 18:50:13 +00002684 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2685 cancel_delayed_work_sync(&adapter->work);
2686 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2687 }
2688
Sathya Perla11ac75e2011-12-13 00:58:50 +00002689 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002690 be_vf_clear(adapter);
2691
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002692 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2693 be_cmd_pmac_del(adapter, adapter->if_handle,
2694 adapter->pmac_id[i], 0);
2695
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002696 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002697
2698 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002699 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002700 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002701 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002702
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002703 kfree(adapter->pmac_id);
2704 adapter->pmac_id = NULL;
2705
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002706 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002707 return 0;
2708}
2709
Sathya Perla4c876612013-02-03 20:30:11 +00002710static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002711{
Sathya Perla4c876612013-02-03 20:30:11 +00002712 struct be_vf_cfg *vf_cfg;
2713 u32 cap_flags, en_flags, vf;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002714 int status;
2715
Sathya Perla4c876612013-02-03 20:30:11 +00002716 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2717 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002718
Sathya Perla4c876612013-02-03 20:30:11 +00002719 for_all_vfs(adapter, vf_cfg, vf) {
2720 if (!BE3_chip(adapter))
2721 be_cmd_get_profile_config(adapter, &cap_flags, vf + 1);
2722
2723 /* If a FW profile exists, then cap_flags are updated */
2724 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2725 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2726 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2727 &vf_cfg->if_handle, vf + 1);
2728 if (status)
2729 goto err;
2730 }
2731err:
2732 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002733}
2734
Sathya Perla39f1d942012-05-08 19:41:24 +00002735static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002736{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002737 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002738 int vf;
2739
Sathya Perla39f1d942012-05-08 19:41:24 +00002740 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2741 GFP_KERNEL);
2742 if (!adapter->vf_cfg)
2743 return -ENOMEM;
2744
Sathya Perla11ac75e2011-12-13 00:58:50 +00002745 for_all_vfs(adapter, vf_cfg, vf) {
2746 vf_cfg->if_handle = -1;
2747 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002748 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002749 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002750}
2751
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002752static int be_vf_setup(struct be_adapter *adapter)
2753{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002754 struct be_vf_cfg *vf_cfg;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002755 u16 def_vlan, lnk_speed;
Sathya Perla4c876612013-02-03 20:30:11 +00002756 int status, old_vfs, vf;
2757 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002758
Sathya Perla4c876612013-02-03 20:30:11 +00002759 old_vfs = be_find_vfs(adapter, ENABLED);
2760 if (old_vfs) {
2761 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2762 if (old_vfs != num_vfs)
2763 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2764 adapter->num_vfs = old_vfs;
Sathya Perla39f1d942012-05-08 19:41:24 +00002765 } else {
Sathya Perla4c876612013-02-03 20:30:11 +00002766 if (num_vfs > adapter->dev_num_vfs)
2767 dev_info(dev, "Device supports %d VFs and not %d\n",
2768 adapter->dev_num_vfs, num_vfs);
2769 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
2770
2771 status = pci_enable_sriov(adapter->pdev, num_vfs);
2772 if (status) {
2773 dev_err(dev, "SRIOV enable failed\n");
2774 adapter->num_vfs = 0;
2775 return 0;
2776 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002777 }
2778
2779 status = be_vf_setup_init(adapter);
2780 if (status)
2781 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002782
Sathya Perla4c876612013-02-03 20:30:11 +00002783 if (old_vfs) {
2784 for_all_vfs(adapter, vf_cfg, vf) {
2785 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2786 if (status)
2787 goto err;
2788 }
2789 } else {
2790 status = be_vfs_if_create(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002791 if (status)
2792 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002793 }
2794
Sathya Perla4c876612013-02-03 20:30:11 +00002795 if (old_vfs) {
2796 status = be_vfs_mac_query(adapter);
2797 if (status)
2798 goto err;
2799 } else {
Sathya Perla39f1d942012-05-08 19:41:24 +00002800 status = be_vf_eth_addr_config(adapter);
2801 if (status)
2802 goto err;
2803 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002804
Sathya Perla11ac75e2011-12-13 00:58:50 +00002805 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla4c876612013-02-03 20:30:11 +00002806 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2807 * Allow full available bandwidth
2808 */
2809 if (BE3_chip(adapter) && !old_vfs)
2810 be_cmd_set_qos(adapter, 1000, vf+1);
2811
2812 status = be_cmd_link_status_query(adapter, &lnk_speed,
2813 NULL, vf + 1);
2814 if (!status)
2815 vf_cfg->tx_rate = lnk_speed;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002816
2817 status = be_cmd_get_hsw_config(adapter, &def_vlan,
Sathya Perla4c876612013-02-03 20:30:11 +00002818 vf + 1, vf_cfg->if_handle);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002819 if (status)
2820 goto err;
2821 vf_cfg->def_vid = def_vlan;
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00002822
2823 be_cmd_enable_vf(adapter, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002824 }
2825 return 0;
2826err:
Sathya Perla4c876612013-02-03 20:30:11 +00002827 dev_err(dev, "VF setup failed\n");
2828 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002829 return status;
2830}
2831
Sathya Perla30128032011-11-10 19:17:57 +00002832static void be_setup_init(struct be_adapter *adapter)
2833{
2834 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002835 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002836 adapter->if_handle = -1;
2837 adapter->be3_native = false;
2838 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002839 if (be_physfn(adapter))
2840 adapter->cmd_privileges = MAX_PRIVILEGES;
2841 else
2842 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00002843}
2844
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002845static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2846 bool *active_mac, u32 *pmac_id)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002847{
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002848 int status = 0;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002849
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002850 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2851 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2852 if (!lancer_chip(adapter) && !be_physfn(adapter))
2853 *active_mac = true;
2854 else
2855 *active_mac = false;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002856
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002857 return status;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002858 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002859
2860 if (lancer_chip(adapter)) {
2861 status = be_cmd_get_mac_from_list(adapter, mac,
2862 active_mac, pmac_id, 0);
2863 if (*active_mac) {
Sathya Perla5ee49792012-09-28 04:39:41 +00002864 status = be_cmd_mac_addr_query(adapter, mac, false,
2865 if_handle, *pmac_id);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002866 }
2867 } else if (be_physfn(adapter)) {
2868 /* For BE3, for PF get permanent MAC */
Sathya Perla5ee49792012-09-28 04:39:41 +00002869 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002870 *active_mac = false;
2871 } else {
2872 /* For BE3, for VF get soft MAC assigned by PF*/
Sathya Perla5ee49792012-09-28 04:39:41 +00002873 status = be_cmd_mac_addr_query(adapter, mac, false,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002874 if_handle, 0);
2875 *active_mac = true;
2876 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002877 return status;
2878}
2879
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002880static void be_get_resources(struct be_adapter *adapter)
2881{
Sathya Perla4c876612013-02-03 20:30:11 +00002882 u16 dev_num_vfs;
2883 int pos, status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002884 bool profile_present = false;
2885
Sathya Perla4c876612013-02-03 20:30:11 +00002886 if (!BEx_chip(adapter)) {
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002887 status = be_cmd_get_func_config(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002888 if (!status)
2889 profile_present = true;
2890 }
2891
2892 if (profile_present) {
2893 /* Sanity fixes for Lancer */
2894 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2895 BE_UC_PMAC_COUNT);
2896 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2897 BE_NUM_VLANS_SUPPORTED);
2898 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2899 BE_MAX_MC);
2900 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2901 MAX_TX_QS);
2902 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2903 BE3_MAX_RSS_QS);
2904 adapter->max_event_queues = min_t(u16,
2905 adapter->max_event_queues,
2906 BE3_MAX_RSS_QS);
2907
2908 if (adapter->max_rss_queues &&
2909 adapter->max_rss_queues == adapter->max_rx_queues)
2910 adapter->max_rss_queues -= 1;
2911
2912 if (adapter->max_event_queues < adapter->max_rss_queues)
2913 adapter->max_rss_queues = adapter->max_event_queues;
2914
2915 } else {
2916 if (be_physfn(adapter))
2917 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2918 else
2919 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2920
2921 if (adapter->function_mode & FLEX10_MODE)
2922 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2923 else
2924 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2925
2926 adapter->max_mcast_mac = BE_MAX_MC;
2927 adapter->max_tx_queues = MAX_TX_QS;
2928 adapter->max_rss_queues = (adapter->be3_native) ?
2929 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2930 adapter->max_event_queues = BE3_MAX_RSS_QS;
2931
2932 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2933 BE_IF_FLAGS_BROADCAST |
2934 BE_IF_FLAGS_MULTICAST |
2935 BE_IF_FLAGS_PASS_L3L4_ERRORS |
2936 BE_IF_FLAGS_MCAST_PROMISCUOUS |
2937 BE_IF_FLAGS_VLAN_PROMISCUOUS |
2938 BE_IF_FLAGS_PROMISCUOUS;
2939
2940 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2941 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2942 }
Sathya Perla4c876612013-02-03 20:30:11 +00002943
2944 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2945 if (pos) {
2946 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2947 &dev_num_vfs);
2948 if (BE3_chip(adapter))
2949 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
2950 adapter->dev_num_vfs = dev_num_vfs;
2951 }
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002952}
2953
Sathya Perla39f1d942012-05-08 19:41:24 +00002954/* Routine to query per function resource limits */
2955static int be_get_config(struct be_adapter *adapter)
2956{
Sathya Perla4c876612013-02-03 20:30:11 +00002957 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00002958
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002959 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2960 &adapter->function_mode,
2961 &adapter->function_caps);
2962 if (status)
2963 goto err;
2964
2965 be_get_resources(adapter);
2966
2967 /* primary mac needs 1 pmac entry */
2968 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2969 sizeof(u32), GFP_KERNEL);
2970 if (!adapter->pmac_id) {
2971 status = -ENOMEM;
2972 goto err;
2973 }
2974
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002975err:
2976 return status;
Sathya Perla39f1d942012-05-08 19:41:24 +00002977}
2978
Sathya Perla5fb379e2009-06-18 00:02:59 +00002979static int be_setup(struct be_adapter *adapter)
2980{
Sathya Perla39f1d942012-05-08 19:41:24 +00002981 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002982 u32 en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002983 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002984 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002985 u8 mac[ETH_ALEN];
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002986 bool active_mac;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002987
Sathya Perla30128032011-11-10 19:17:57 +00002988 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002989
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002990 if (!lancer_chip(adapter))
2991 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00002992
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002993 status = be_get_config(adapter);
2994 if (status)
2995 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002996
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002997 be_msix_enable(adapter);
2998
2999 status = be_evt_queues_create(adapter);
3000 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003001 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003002
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003003 status = be_tx_cqs_create(adapter);
3004 if (status)
3005 goto err;
3006
3007 status = be_rx_cqs_create(adapter);
3008 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003009 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003010
Sathya Perla5fb379e2009-06-18 00:02:59 +00003011 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003012 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003013 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003014
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003015 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3016 /* In UMC mode FW does not return right privileges.
3017 * Override with correct privilege equivalent to PF.
3018 */
3019 if (be_is_mc(adapter))
3020 adapter->cmd_privileges = MAX_PRIVILEGES;
3021
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003022 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3023 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00003024
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003025 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003026 en_flags |= BE_IF_FLAGS_RSS;
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003027
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003028 en_flags = en_flags & adapter->if_cap_flags;
Padmanabh Ratnakar0b13fb42012-07-18 02:51:58 +00003029
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003030 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003031 &adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003032 if (status != 0)
3033 goto err;
3034
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003035 memset(mac, 0, ETH_ALEN);
3036 active_mac = false;
3037 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3038 &active_mac, &adapter->pmac_id[0]);
3039 if (status != 0)
3040 goto err;
3041
3042 if (!active_mac) {
3043 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3044 &adapter->pmac_id[0], 0);
3045 if (status != 0)
3046 goto err;
3047 }
3048
3049 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3050 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3051 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003052 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00003053
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003054 status = be_tx_qs_create(adapter);
3055 if (status)
3056 goto err;
3057
Sathya Perla04b71172011-09-27 13:30:27 -04003058 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003059
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003060 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003061 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003062
3063 be_set_rx_mode(adapter->netdev);
3064
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003065 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003066
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003067 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3068 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003069 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003070
Sathya Perla39f1d942012-05-08 19:41:24 +00003071 if (be_physfn(adapter) && num_vfs) {
3072 if (adapter->dev_num_vfs)
3073 be_vf_setup(adapter);
3074 else
3075 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003076 }
3077
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003078 status = be_cmd_get_phy_info(adapter);
3079 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003080 adapter->phy.fc_autoneg = 1;
3081
Sathya Perla191eb752012-02-23 18:50:13 +00003082 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3083 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003084 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003085err:
3086 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003087 return status;
3088}
3089
Ivan Vecera66268732011-12-08 01:31:21 +00003090#ifdef CONFIG_NET_POLL_CONTROLLER
3091static void be_netpoll(struct net_device *netdev)
3092{
3093 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003094 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003095 int i;
3096
Sathya Perlae49cc342012-11-27 19:50:02 +00003097 for_all_evt_queues(adapter, eqo, i) {
3098 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3099 napi_schedule(&eqo->napi);
3100 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003101
3102 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003103}
3104#endif
3105
Ajit Khaparde84517482009-09-04 03:12:16 +00003106#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003107char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3108
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003109static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003110 const u8 *p, u32 img_start, int image_size,
3111 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003112{
3113 u32 crc_offset;
3114 u8 flashed_crc[4];
3115 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003116
3117 crc_offset = hdr_size + img_start + image_size - 4;
3118
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003119 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003120
3121 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003122 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003123 if (status) {
3124 dev_err(&adapter->pdev->dev,
3125 "could not get crc from flash, not flashing redboot\n");
3126 return false;
3127 }
3128
3129 /*update redboot only if crc does not match*/
3130 if (!memcmp(flashed_crc, p, 4))
3131 return false;
3132 else
3133 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003134}
3135
Sathya Perla306f1342011-08-02 19:57:45 +00003136static bool phy_flashing_required(struct be_adapter *adapter)
3137{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003138 return (adapter->phy.phy_type == TN_8022 &&
3139 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003140}
3141
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003142static bool is_comp_in_ufi(struct be_adapter *adapter,
3143 struct flash_section_info *fsec, int type)
3144{
3145 int i = 0, img_type = 0;
3146 struct flash_section_info_g2 *fsec_g2 = NULL;
3147
Sathya Perlaca34fe32012-11-06 17:48:56 +00003148 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003149 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3150
3151 for (i = 0; i < MAX_FLASH_COMP; i++) {
3152 if (fsec_g2)
3153 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3154 else
3155 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3156
3157 if (img_type == type)
3158 return true;
3159 }
3160 return false;
3161
3162}
3163
3164struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3165 int header_size,
3166 const struct firmware *fw)
3167{
3168 struct flash_section_info *fsec = NULL;
3169 const u8 *p = fw->data;
3170
3171 p += header_size;
3172 while (p < (fw->data + fw->size)) {
3173 fsec = (struct flash_section_info *)p;
3174 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3175 return fsec;
3176 p += 32;
3177 }
3178 return NULL;
3179}
3180
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003181static int be_flash(struct be_adapter *adapter, const u8 *img,
3182 struct be_dma_mem *flash_cmd, int optype, int img_size)
3183{
3184 u32 total_bytes = 0, flash_op, num_bytes = 0;
3185 int status = 0;
3186 struct be_cmd_write_flashrom *req = flash_cmd->va;
3187
3188 total_bytes = img_size;
3189 while (total_bytes) {
3190 num_bytes = min_t(u32, 32*1024, total_bytes);
3191
3192 total_bytes -= num_bytes;
3193
3194 if (!total_bytes) {
3195 if (optype == OPTYPE_PHY_FW)
3196 flash_op = FLASHROM_OPER_PHY_FLASH;
3197 else
3198 flash_op = FLASHROM_OPER_FLASH;
3199 } else {
3200 if (optype == OPTYPE_PHY_FW)
3201 flash_op = FLASHROM_OPER_PHY_SAVE;
3202 else
3203 flash_op = FLASHROM_OPER_SAVE;
3204 }
3205
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003206 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003207 img += num_bytes;
3208 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3209 flash_op, num_bytes);
3210 if (status) {
3211 if (status == ILLEGAL_IOCTL_REQ &&
3212 optype == OPTYPE_PHY_FW)
3213 break;
3214 dev_err(&adapter->pdev->dev,
3215 "cmd to write to flash rom failed.\n");
3216 return status;
3217 }
3218 }
3219 return 0;
3220}
3221
Sathya Perlaca34fe32012-11-06 17:48:56 +00003222/* For BE2 and BE3 */
3223static int be_flash_BEx(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003224 const struct firmware *fw,
3225 struct be_dma_mem *flash_cmd,
3226 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003227
Ajit Khaparde84517482009-09-04 03:12:16 +00003228{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003229 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003230 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003231 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003232 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003233 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003234 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003235
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003236 struct flash_comp gen3_flash_types[] = {
3237 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3238 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3239 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3240 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3241 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3242 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3243 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3244 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3245 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3246 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3247 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3248 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3249 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3250 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3251 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3252 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3253 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3254 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3255 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3256 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003257 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003258
3259 struct flash_comp gen2_flash_types[] = {
3260 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3261 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3262 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3263 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3264 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3265 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3266 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3267 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3268 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3269 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3270 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3271 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3272 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3273 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3274 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3275 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003276 };
3277
Sathya Perlaca34fe32012-11-06 17:48:56 +00003278 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003279 pflashcomp = gen3_flash_types;
3280 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003281 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003282 } else {
3283 pflashcomp = gen2_flash_types;
3284 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003285 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003286 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003287
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003288 /* Get flash section info*/
3289 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3290 if (!fsec) {
3291 dev_err(&adapter->pdev->dev,
3292 "Invalid Cookie. UFI corrupted ?\n");
3293 return -1;
3294 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003295 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003296 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003297 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003298
3299 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3300 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3301 continue;
3302
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003303 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3304 !phy_flashing_required(adapter))
3305 continue;
3306
3307 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3308 redboot = be_flash_redboot(adapter, fw->data,
3309 pflashcomp[i].offset, pflashcomp[i].size,
3310 filehdr_size + img_hdrs_size);
3311 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003312 continue;
3313 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003314
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003315 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003316 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003317 if (p + pflashcomp[i].size > fw->data + fw->size)
3318 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003319
3320 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3321 pflashcomp[i].size);
3322 if (status) {
3323 dev_err(&adapter->pdev->dev,
3324 "Flashing section type %d failed.\n",
3325 pflashcomp[i].img_type);
3326 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003327 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003328 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003329 return 0;
3330}
3331
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003332static int be_flash_skyhawk(struct be_adapter *adapter,
3333 const struct firmware *fw,
3334 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003335{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003336 int status = 0, i, filehdr_size = 0;
3337 int img_offset, img_size, img_optype, redboot;
3338 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3339 const u8 *p = fw->data;
3340 struct flash_section_info *fsec = NULL;
3341
3342 filehdr_size = sizeof(struct flash_file_hdr_g3);
3343 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3344 if (!fsec) {
3345 dev_err(&adapter->pdev->dev,
3346 "Invalid Cookie. UFI corrupted ?\n");
3347 return -1;
3348 }
3349
3350 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3351 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3352 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3353
3354 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3355 case IMAGE_FIRMWARE_iSCSI:
3356 img_optype = OPTYPE_ISCSI_ACTIVE;
3357 break;
3358 case IMAGE_BOOT_CODE:
3359 img_optype = OPTYPE_REDBOOT;
3360 break;
3361 case IMAGE_OPTION_ROM_ISCSI:
3362 img_optype = OPTYPE_BIOS;
3363 break;
3364 case IMAGE_OPTION_ROM_PXE:
3365 img_optype = OPTYPE_PXE_BIOS;
3366 break;
3367 case IMAGE_OPTION_ROM_FCoE:
3368 img_optype = OPTYPE_FCOE_BIOS;
3369 break;
3370 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3371 img_optype = OPTYPE_ISCSI_BACKUP;
3372 break;
3373 case IMAGE_NCSI:
3374 img_optype = OPTYPE_NCSI_FW;
3375 break;
3376 default:
3377 continue;
3378 }
3379
3380 if (img_optype == OPTYPE_REDBOOT) {
3381 redboot = be_flash_redboot(adapter, fw->data,
3382 img_offset, img_size,
3383 filehdr_size + img_hdrs_size);
3384 if (!redboot)
3385 continue;
3386 }
3387
3388 p = fw->data;
3389 p += filehdr_size + img_offset + img_hdrs_size;
3390 if (p + img_size > fw->data + fw->size)
3391 return -1;
3392
3393 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3394 if (status) {
3395 dev_err(&adapter->pdev->dev,
3396 "Flashing section type %d failed.\n",
3397 fsec->fsec_entry[i].type);
3398 return status;
3399 }
3400 }
3401 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003402}
3403
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003404static int lancer_wait_idle(struct be_adapter *adapter)
3405{
3406#define SLIPORT_IDLE_TIMEOUT 30
3407 u32 reg_val;
3408 int status = 0, i;
3409
3410 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3411 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3412 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3413 break;
3414
3415 ssleep(1);
3416 }
3417
3418 if (i == SLIPORT_IDLE_TIMEOUT)
3419 status = -1;
3420
3421 return status;
3422}
3423
3424static int lancer_fw_reset(struct be_adapter *adapter)
3425{
3426 int status = 0;
3427
3428 status = lancer_wait_idle(adapter);
3429 if (status)
3430 return status;
3431
3432 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3433 PHYSDEV_CONTROL_OFFSET);
3434
3435 return status;
3436}
3437
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003438static int lancer_fw_download(struct be_adapter *adapter,
3439 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003440{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003441#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3442#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3443 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003444 const u8 *data_ptr = NULL;
3445 u8 *dest_image_ptr = NULL;
3446 size_t image_size = 0;
3447 u32 chunk_size = 0;
3448 u32 data_written = 0;
3449 u32 offset = 0;
3450 int status = 0;
3451 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003452 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003453
3454 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3455 dev_err(&adapter->pdev->dev,
3456 "FW Image not properly aligned. "
3457 "Length must be 4 byte aligned.\n");
3458 status = -EINVAL;
3459 goto lancer_fw_exit;
3460 }
3461
3462 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3463 + LANCER_FW_DOWNLOAD_CHUNK;
3464 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00003465 &flash_cmd.dma, GFP_KERNEL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003466 if (!flash_cmd.va) {
3467 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003468 goto lancer_fw_exit;
3469 }
3470
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003471 dest_image_ptr = flash_cmd.va +
3472 sizeof(struct lancer_cmd_req_write_object);
3473 image_size = fw->size;
3474 data_ptr = fw->data;
3475
3476 while (image_size) {
3477 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3478
3479 /* Copy the image chunk content. */
3480 memcpy(dest_image_ptr, data_ptr, chunk_size);
3481
3482 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003483 chunk_size, offset,
3484 LANCER_FW_DOWNLOAD_LOCATION,
3485 &data_written, &change_status,
3486 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003487 if (status)
3488 break;
3489
3490 offset += data_written;
3491 data_ptr += data_written;
3492 image_size -= data_written;
3493 }
3494
3495 if (!status) {
3496 /* Commit the FW written */
3497 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003498 0, offset,
3499 LANCER_FW_DOWNLOAD_LOCATION,
3500 &data_written, &change_status,
3501 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003502 }
3503
3504 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3505 flash_cmd.dma);
3506 if (status) {
3507 dev_err(&adapter->pdev->dev,
3508 "Firmware load error. "
3509 "Status code: 0x%x Additional Status: 0x%x\n",
3510 status, add_status);
3511 goto lancer_fw_exit;
3512 }
3513
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003514 if (change_status == LANCER_FW_RESET_NEEDED) {
3515 status = lancer_fw_reset(adapter);
3516 if (status) {
3517 dev_err(&adapter->pdev->dev,
3518 "Adapter busy for FW reset.\n"
3519 "New FW will not be active.\n");
3520 goto lancer_fw_exit;
3521 }
3522 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3523 dev_err(&adapter->pdev->dev,
3524 "System reboot required for new FW"
3525 " to be active\n");
3526 }
3527
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003528 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3529lancer_fw_exit:
3530 return status;
3531}
3532
Sathya Perlaca34fe32012-11-06 17:48:56 +00003533#define UFI_TYPE2 2
3534#define UFI_TYPE3 3
3535#define UFI_TYPE4 4
3536static int be_get_ufi_type(struct be_adapter *adapter,
3537 struct flash_file_hdr_g2 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003538{
3539 if (fhdr == NULL)
3540 goto be_get_ufi_exit;
3541
Sathya Perlaca34fe32012-11-06 17:48:56 +00003542 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3543 return UFI_TYPE4;
3544 else if (BE3_chip(adapter) && fhdr->build[0] == '3')
3545 return UFI_TYPE3;
3546 else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3547 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003548
3549be_get_ufi_exit:
3550 dev_err(&adapter->pdev->dev,
3551 "UFI and Interface are not compatible for flashing\n");
3552 return -1;
3553}
3554
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003555static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3556{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003557 struct flash_file_hdr_g2 *fhdr;
3558 struct flash_file_hdr_g3 *fhdr3;
3559 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003560 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003561 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003562 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003563
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003564 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003565 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3566 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003567 if (!flash_cmd.va) {
3568 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003569 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003570 }
3571
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003572 p = fw->data;
3573 fhdr = (struct flash_file_hdr_g2 *)p;
3574
Sathya Perlaca34fe32012-11-06 17:48:56 +00003575 ufi_type = be_get_ufi_type(adapter, fhdr);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003576
3577 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
3578 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3579 for (i = 0; i < num_imgs; i++) {
3580 img_hdr_ptr = (struct image_hdr *)(fw->data +
3581 (sizeof(struct flash_file_hdr_g3) +
3582 i * sizeof(struct image_hdr)));
3583 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Sathya Perlaca34fe32012-11-06 17:48:56 +00003584 if (ufi_type == UFI_TYPE4)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003585 status = be_flash_skyhawk(adapter, fw,
3586 &flash_cmd, num_imgs);
Sathya Perlaca34fe32012-11-06 17:48:56 +00003587 else if (ufi_type == UFI_TYPE3)
3588 status = be_flash_BEx(adapter, fw, &flash_cmd,
3589 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003590 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003591 }
3592
Sathya Perlaca34fe32012-11-06 17:48:56 +00003593 if (ufi_type == UFI_TYPE2)
3594 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003595 else if (ufi_type == -1)
3596 status = -1;
3597
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003598 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3599 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003600 if (status) {
3601 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003602 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003603 }
3604
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003605 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003606
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003607be_fw_exit:
3608 return status;
3609}
3610
3611int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3612{
3613 const struct firmware *fw;
3614 int status;
3615
3616 if (!netif_running(adapter->netdev)) {
3617 dev_err(&adapter->pdev->dev,
3618 "Firmware load not allowed (interface is down)\n");
3619 return -1;
3620 }
3621
3622 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3623 if (status)
3624 goto fw_exit;
3625
3626 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3627
3628 if (lancer_chip(adapter))
3629 status = lancer_fw_download(adapter, fw);
3630 else
3631 status = be_fw_download(adapter, fw);
3632
Ajit Khaparde84517482009-09-04 03:12:16 +00003633fw_exit:
3634 release_firmware(fw);
3635 return status;
3636}
3637
stephen hemmingere5686ad2012-01-05 19:10:25 +00003638static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003639 .ndo_open = be_open,
3640 .ndo_stop = be_close,
3641 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003642 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003643 .ndo_set_mac_address = be_mac_addr_set,
3644 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003645 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003646 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003647 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3648 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003649 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003650 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003651 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003652 .ndo_get_vf_config = be_get_vf_config,
3653#ifdef CONFIG_NET_POLL_CONTROLLER
3654 .ndo_poll_controller = be_netpoll,
3655#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003656};
3657
3658static void be_netdev_init(struct net_device *netdev)
3659{
3660 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003661 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003662 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003663
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003664 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003665 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00003666 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003667 if (be_multi_rxq(adapter))
3668 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003669
3670 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00003671 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003672
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003673 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003674 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003675
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003676 netdev->priv_flags |= IFF_UNICAST_FLT;
3677
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003678 netdev->flags |= IFF_MULTICAST;
3679
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00003680 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003681
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003682 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003683
3684 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3685
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003686 for_all_evt_queues(adapter, eqo, i)
3687 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003688}
3689
3690static void be_unmap_pci_bars(struct be_adapter *adapter)
3691{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003692 if (adapter->csr)
3693 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003694 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00003695 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003696}
3697
Sathya Perlace66f782012-11-06 17:48:58 +00003698static int db_bar(struct be_adapter *adapter)
3699{
3700 if (lancer_chip(adapter) || !be_physfn(adapter))
3701 return 0;
3702 else
3703 return 4;
3704}
3705
3706static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00003707{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00003708 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00003709 adapter->roce_db.size = 4096;
3710 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3711 db_bar(adapter));
3712 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3713 db_bar(adapter));
3714 }
Parav Pandit045508a2012-03-26 14:27:13 +00003715 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003716}
3717
3718static int be_map_pci_bars(struct be_adapter *adapter)
3719{
3720 u8 __iomem *addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003721 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003722
Sathya Perlace66f782012-11-06 17:48:58 +00003723 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3724 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3725 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003726
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003727 if (BEx_chip(adapter) && be_physfn(adapter)) {
3728 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3729 if (adapter->csr == NULL)
3730 return -ENOMEM;
3731 }
3732
Sathya Perlace66f782012-11-06 17:48:58 +00003733 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003734 if (addr == NULL)
3735 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003736 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003737
3738 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003739 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00003740
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003741pci_map_err:
3742 be_unmap_pci_bars(adapter);
3743 return -ENOMEM;
3744}
3745
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003746static void be_ctrl_cleanup(struct be_adapter *adapter)
3747{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003748 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003749
3750 be_unmap_pci_bars(adapter);
3751
3752 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003753 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3754 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003755
Sathya Perla5b8821b2011-08-02 19:57:44 +00003756 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003757 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003758 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3759 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003760}
3761
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003762static int be_ctrl_init(struct be_adapter *adapter)
3763{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003764 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3765 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003766 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00003767 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003768 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003769
Sathya Perlace66f782012-11-06 17:48:58 +00003770 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3771 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3772 SLI_INTF_FAMILY_SHIFT;
3773 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3774
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003775 status = be_map_pci_bars(adapter);
3776 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003777 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003778
3779 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003780 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3781 mbox_mem_alloc->size,
3782 &mbox_mem_alloc->dma,
3783 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003784 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003785 status = -ENOMEM;
3786 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003787 }
3788 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3789 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3790 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3791 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003792
Sathya Perla5b8821b2011-08-02 19:57:44 +00003793 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3794 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
Joe Perches1f9061d22013-03-15 07:23:58 +00003795 &rx_filter->dma,
3796 GFP_KERNEL | __GFP_ZERO);
Sathya Perla5b8821b2011-08-02 19:57:44 +00003797 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003798 status = -ENOMEM;
3799 goto free_mbox;
3800 }
Joe Perches1f9061d22013-03-15 07:23:58 +00003801
Ivan Vecera29849612010-12-14 05:43:19 +00003802 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003803 spin_lock_init(&adapter->mcc_lock);
3804 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003805
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003806 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003807 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003808 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003809
3810free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003811 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3812 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003813
3814unmap_pci_bars:
3815 be_unmap_pci_bars(adapter);
3816
3817done:
3818 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003819}
3820
3821static void be_stats_cleanup(struct be_adapter *adapter)
3822{
Sathya Perla3abcded2010-10-03 22:12:27 -07003823 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003824
3825 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003826 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3827 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003828}
3829
3830static int be_stats_init(struct be_adapter *adapter)
3831{
Sathya Perla3abcded2010-10-03 22:12:27 -07003832 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003833
Sathya Perlaca34fe32012-11-06 17:48:56 +00003834 if (lancer_chip(adapter))
3835 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3836 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003837 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Sathya Perlaca34fe32012-11-06 17:48:56 +00003838 else
3839 /* BE3 and Skyhawk */
3840 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3841
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003842 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
Joe Perches1f9061d22013-03-15 07:23:58 +00003843 GFP_KERNEL | __GFP_ZERO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003844 if (cmd->va == NULL)
3845 return -1;
3846 return 0;
3847}
3848
Bill Pemberton3bc6b062012-12-03 09:23:09 -05003849static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003850{
3851 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003852
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003853 if (!adapter)
3854 return;
3855
Parav Pandit045508a2012-03-26 14:27:13 +00003856 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00003857 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00003858
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003859 cancel_delayed_work_sync(&adapter->func_recovery_work);
3860
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003861 unregister_netdev(adapter->netdev);
3862
Sathya Perla5fb379e2009-06-18 00:02:59 +00003863 be_clear(adapter);
3864
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00003865 /* tell fw we're done with firing cmds */
3866 be_cmd_fw_clean(adapter);
3867
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003868 be_stats_cleanup(adapter);
3869
3870 be_ctrl_cleanup(adapter);
3871
Sathya Perlad6b6d982012-09-05 01:56:48 +00003872 pci_disable_pcie_error_reporting(pdev);
3873
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003874 pci_set_drvdata(pdev, NULL);
3875 pci_release_regions(pdev);
3876 pci_disable_device(pdev);
3877
3878 free_netdev(adapter->netdev);
3879}
3880
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003881bool be_is_wol_supported(struct be_adapter *adapter)
3882{
3883 return ((adapter->wol_cap & BE_WOL_CAP) &&
3884 !be_is_wol_excluded(adapter)) ? true : false;
3885}
3886
Somnath Kotur941a77d2012-05-17 22:59:03 +00003887u32 be_get_fw_log_level(struct be_adapter *adapter)
3888{
3889 struct be_dma_mem extfat_cmd;
3890 struct be_fat_conf_params *cfgs;
3891 int status;
3892 u32 level = 0;
3893 int j;
3894
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003895 if (lancer_chip(adapter))
3896 return 0;
3897
Somnath Kotur941a77d2012-05-17 22:59:03 +00003898 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3899 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3900 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3901 &extfat_cmd.dma);
3902
3903 if (!extfat_cmd.va) {
3904 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3905 __func__);
3906 goto err;
3907 }
3908
3909 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3910 if (!status) {
3911 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3912 sizeof(struct be_cmd_resp_hdr));
Anton Blanchardac46a462012-07-24 15:05:25 +00003913 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
Somnath Kotur941a77d2012-05-17 22:59:03 +00003914 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3915 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3916 }
3917 }
3918 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3919 extfat_cmd.dma);
3920err:
3921 return level;
3922}
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003923
Sathya Perla39f1d942012-05-08 19:41:24 +00003924static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003925{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003926 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00003927 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003928
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003929 status = be_cmd_get_cntl_attributes(adapter);
3930 if (status)
3931 return status;
3932
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003933 status = be_cmd_get_acpi_wol_cap(adapter);
3934 if (status) {
3935 /* in case of a failure to get wol capabillities
3936 * check the exclusion list to determine WOL capability */
3937 if (!be_is_wol_excluded(adapter))
3938 adapter->wol_cap |= BE_WOL_CAP;
3939 }
3940
3941 if (be_is_wol_supported(adapter))
3942 adapter->wol = true;
3943
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00003944 /* Must be a power of 2 or else MODULO will BUG_ON */
3945 adapter->be_get_temp_freq = 64;
3946
Somnath Kotur941a77d2012-05-17 22:59:03 +00003947 level = be_get_fw_log_level(adapter);
3948 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3949
Sathya Perla2243e2e2009-11-22 22:02:03 +00003950 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003951}
3952
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003953static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003954{
3955 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003956
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003957 status = lancer_test_and_set_rdy_state(adapter);
3958 if (status)
3959 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003960
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003961 if (netif_running(adapter->netdev))
3962 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003963
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003964 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003965
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003966 adapter->hw_error = false;
3967 adapter->fw_timeout = false;
3968
3969 status = be_setup(adapter);
3970 if (status)
3971 goto err;
3972
3973 if (netif_running(adapter->netdev)) {
3974 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003975 if (status)
3976 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003977 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003978
3979 dev_err(&adapter->pdev->dev,
3980 "Adapter SLIPORT recovery succeeded\n");
3981 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003982err:
Padmanabh Ratnakar67297ad2012-10-20 06:02:27 +00003983 if (adapter->eeh_error)
3984 dev_err(&adapter->pdev->dev,
3985 "Adapter SLIPORT recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003986
3987 return status;
3988}
3989
3990static void be_func_recovery_task(struct work_struct *work)
3991{
3992 struct be_adapter *adapter =
3993 container_of(work, struct be_adapter, func_recovery_work.work);
3994 int status;
3995
3996 be_detect_error(adapter);
3997
3998 if (adapter->hw_error && lancer_chip(adapter)) {
3999
4000 if (adapter->eeh_error)
4001 goto out;
4002
4003 rtnl_lock();
4004 netif_device_detach(adapter->netdev);
4005 rtnl_unlock();
4006
4007 status = lancer_recover_func(adapter);
4008
4009 if (!status)
4010 netif_device_attach(adapter->netdev);
4011 }
4012
4013out:
4014 schedule_delayed_work(&adapter->func_recovery_work,
4015 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004016}
4017
4018static void be_worker(struct work_struct *work)
4019{
4020 struct be_adapter *adapter =
4021 container_of(work, struct be_adapter, work.work);
4022 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004023 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004024 int i;
4025
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004026 /* when interrupts are not yet enabled, just reap any pending
4027 * mcc completions */
4028 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004029 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004030 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004031 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004032 goto reschedule;
4033 }
4034
4035 if (!adapter->stats_cmd_sent) {
4036 if (lancer_chip(adapter))
4037 lancer_cmd_get_pport_stats(adapter,
4038 &adapter->stats_cmd);
4039 else
4040 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4041 }
4042
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004043 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4044 be_cmd_get_die_temperature(adapter);
4045
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004046 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004047 if (rxo->rx_post_starved) {
4048 rxo->rx_post_starved = false;
4049 be_post_rx_frags(rxo, GFP_KERNEL);
4050 }
4051 }
4052
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004053 for_all_evt_queues(adapter, eqo, i)
4054 be_eqd_update(adapter, eqo);
4055
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004056reschedule:
4057 adapter->work_counter++;
4058 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4059}
4060
Sathya Perla39f1d942012-05-08 19:41:24 +00004061static bool be_reset_required(struct be_adapter *adapter)
4062{
Sathya Perlad79c0a22012-06-05 19:37:22 +00004063 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004064}
4065
Sathya Perlad3791422012-09-28 04:39:44 +00004066static char *mc_name(struct be_adapter *adapter)
4067{
4068 if (adapter->function_mode & FLEX10_MODE)
4069 return "FLEX10";
4070 else if (adapter->function_mode & VNIC_MODE)
4071 return "vNIC";
4072 else if (adapter->function_mode & UMC_ENABLED)
4073 return "UMC";
4074 else
4075 return "";
4076}
4077
4078static inline char *func_name(struct be_adapter *adapter)
4079{
4080 return be_physfn(adapter) ? "PF" : "VF";
4081}
4082
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004083static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004084{
4085 int status = 0;
4086 struct be_adapter *adapter;
4087 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004088 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004089
4090 status = pci_enable_device(pdev);
4091 if (status)
4092 goto do_none;
4093
4094 status = pci_request_regions(pdev, DRV_NAME);
4095 if (status)
4096 goto disable_dev;
4097 pci_set_master(pdev);
4098
Sathya Perla7f640062012-06-05 19:37:20 +00004099 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004100 if (netdev == NULL) {
4101 status = -ENOMEM;
4102 goto rel_reg;
4103 }
4104 adapter = netdev_priv(netdev);
4105 adapter->pdev = pdev;
4106 pci_set_drvdata(pdev, adapter);
4107 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004108 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004109
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004110 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004111 if (!status) {
4112 netdev->features |= NETIF_F_HIGHDMA;
4113 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004114 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004115 if (status) {
4116 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4117 goto free_netdev;
4118 }
4119 }
4120
Sathya Perlad6b6d982012-09-05 01:56:48 +00004121 status = pci_enable_pcie_error_reporting(pdev);
4122 if (status)
4123 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4124
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004125 status = be_ctrl_init(adapter);
4126 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004127 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004128
Sathya Perla2243e2e2009-11-22 22:02:03 +00004129 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004130 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004131 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004132 if (status)
4133 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004134 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004135
4136 /* tell fw we're ready to fire cmds */
4137 status = be_cmd_fw_init(adapter);
4138 if (status)
4139 goto ctrl_clean;
4140
Sathya Perla39f1d942012-05-08 19:41:24 +00004141 if (be_reset_required(adapter)) {
4142 status = be_cmd_reset_function(adapter);
4143 if (status)
4144 goto ctrl_clean;
4145 }
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004146
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004147 /* Wait for interrupts to quiesce after an FLR */
4148 msleep(100);
4149
4150 /* Allow interrupts for other ULPs running on NIC function */
4151 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004152
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004153 status = be_stats_init(adapter);
4154 if (status)
4155 goto ctrl_clean;
4156
Sathya Perla39f1d942012-05-08 19:41:24 +00004157 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004158 if (status)
4159 goto stats_clean;
4160
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004161 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004162 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004163 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004164
Sathya Perla5fb379e2009-06-18 00:02:59 +00004165 status = be_setup(adapter);
4166 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004167 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004168
Sathya Perla3abcded2010-10-03 22:12:27 -07004169 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004170 status = register_netdev(netdev);
4171 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004172 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004173
Parav Pandit045508a2012-03-26 14:27:13 +00004174 be_roce_dev_add(adapter);
4175
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004176 schedule_delayed_work(&adapter->func_recovery_work,
4177 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004178
4179 be_cmd_query_port_name(adapter, &port_name);
4180
Sathya Perlad3791422012-09-28 04:39:44 +00004181 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4182 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004183
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004184 return 0;
4185
Sathya Perla5fb379e2009-06-18 00:02:59 +00004186unsetup:
4187 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004188stats_clean:
4189 be_stats_cleanup(adapter);
4190ctrl_clean:
4191 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004192free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004193 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004194 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004195rel_reg:
4196 pci_release_regions(pdev);
4197disable_dev:
4198 pci_disable_device(pdev);
4199do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004200 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004201 return status;
4202}
4203
4204static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4205{
4206 struct be_adapter *adapter = pci_get_drvdata(pdev);
4207 struct net_device *netdev = adapter->netdev;
4208
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004209 if (adapter->wol)
4210 be_setup_wol(adapter, true);
4211
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004212 cancel_delayed_work_sync(&adapter->func_recovery_work);
4213
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004214 netif_device_detach(netdev);
4215 if (netif_running(netdev)) {
4216 rtnl_lock();
4217 be_close(netdev);
4218 rtnl_unlock();
4219 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004220 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004221
4222 pci_save_state(pdev);
4223 pci_disable_device(pdev);
4224 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4225 return 0;
4226}
4227
4228static int be_resume(struct pci_dev *pdev)
4229{
4230 int status = 0;
4231 struct be_adapter *adapter = pci_get_drvdata(pdev);
4232 struct net_device *netdev = adapter->netdev;
4233
4234 netif_device_detach(netdev);
4235
4236 status = pci_enable_device(pdev);
4237 if (status)
4238 return status;
4239
4240 pci_set_power_state(pdev, 0);
4241 pci_restore_state(pdev);
4242
Sathya Perla2243e2e2009-11-22 22:02:03 +00004243 /* tell fw we're ready to fire cmds */
4244 status = be_cmd_fw_init(adapter);
4245 if (status)
4246 return status;
4247
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004248 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004249 if (netif_running(netdev)) {
4250 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004251 be_open(netdev);
4252 rtnl_unlock();
4253 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004254
4255 schedule_delayed_work(&adapter->func_recovery_work,
4256 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004257 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004258
4259 if (adapter->wol)
4260 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004261
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004262 return 0;
4263}
4264
Sathya Perla82456b02010-02-17 01:35:37 +00004265/*
4266 * An FLR will stop BE from DMAing any data.
4267 */
4268static void be_shutdown(struct pci_dev *pdev)
4269{
4270 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004271
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004272 if (!adapter)
4273 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004274
Sathya Perla0f4a6822011-03-21 20:49:28 +00004275 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004276 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004277
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004278 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004279
Ajit Khaparde57841862011-04-06 18:08:43 +00004280 be_cmd_reset_function(adapter);
4281
Sathya Perla82456b02010-02-17 01:35:37 +00004282 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004283}
4284
Sathya Perlacf588472010-02-14 21:22:01 +00004285static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4286 pci_channel_state_t state)
4287{
4288 struct be_adapter *adapter = pci_get_drvdata(pdev);
4289 struct net_device *netdev = adapter->netdev;
4290
4291 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4292
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004293 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004294
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004295 cancel_delayed_work_sync(&adapter->func_recovery_work);
4296
4297 rtnl_lock();
Sathya Perlacf588472010-02-14 21:22:01 +00004298 netif_device_detach(netdev);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004299 rtnl_unlock();
Sathya Perlacf588472010-02-14 21:22:01 +00004300
4301 if (netif_running(netdev)) {
4302 rtnl_lock();
4303 be_close(netdev);
4304 rtnl_unlock();
4305 }
4306 be_clear(adapter);
4307
4308 if (state == pci_channel_io_perm_failure)
4309 return PCI_ERS_RESULT_DISCONNECT;
4310
4311 pci_disable_device(pdev);
4312
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004313 /* The error could cause the FW to trigger a flash debug dump.
4314 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004315 * can cause it not to recover; wait for it to finish.
4316 * Wait only for first function as it is needed only once per
4317 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004318 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004319 if (pdev->devfn == 0)
4320 ssleep(30);
4321
Sathya Perlacf588472010-02-14 21:22:01 +00004322 return PCI_ERS_RESULT_NEED_RESET;
4323}
4324
4325static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4326{
4327 struct be_adapter *adapter = pci_get_drvdata(pdev);
4328 int status;
4329
4330 dev_info(&adapter->pdev->dev, "EEH reset\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004331 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004332
4333 status = pci_enable_device(pdev);
4334 if (status)
4335 return PCI_ERS_RESULT_DISCONNECT;
4336
4337 pci_set_master(pdev);
4338 pci_set_power_state(pdev, 0);
4339 pci_restore_state(pdev);
4340
4341 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004342 dev_info(&adapter->pdev->dev,
4343 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004344 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004345 if (status)
4346 return PCI_ERS_RESULT_DISCONNECT;
4347
Sathya Perlad6b6d982012-09-05 01:56:48 +00004348 pci_cleanup_aer_uncorrect_error_status(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004349 return PCI_ERS_RESULT_RECOVERED;
4350}
4351
4352static void be_eeh_resume(struct pci_dev *pdev)
4353{
4354 int status = 0;
4355 struct be_adapter *adapter = pci_get_drvdata(pdev);
4356 struct net_device *netdev = adapter->netdev;
4357
4358 dev_info(&adapter->pdev->dev, "EEH resume\n");
4359
4360 pci_save_state(pdev);
4361
4362 /* tell fw we're ready to fire cmds */
4363 status = be_cmd_fw_init(adapter);
4364 if (status)
4365 goto err;
4366
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004367 status = be_cmd_reset_function(adapter);
4368 if (status)
4369 goto err;
4370
Sathya Perlacf588472010-02-14 21:22:01 +00004371 status = be_setup(adapter);
4372 if (status)
4373 goto err;
4374
4375 if (netif_running(netdev)) {
4376 status = be_open(netdev);
4377 if (status)
4378 goto err;
4379 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004380
4381 schedule_delayed_work(&adapter->func_recovery_work,
4382 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004383 netif_device_attach(netdev);
4384 return;
4385err:
4386 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004387}
4388
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004389static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004390 .error_detected = be_eeh_err_detected,
4391 .slot_reset = be_eeh_reset,
4392 .resume = be_eeh_resume,
4393};
4394
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004395static struct pci_driver be_driver = {
4396 .name = DRV_NAME,
4397 .id_table = be_dev_ids,
4398 .probe = be_probe,
4399 .remove = be_remove,
4400 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004401 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004402 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004403 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004404};
4405
4406static int __init be_init_module(void)
4407{
Joe Perches8e95a202009-12-03 07:58:21 +00004408 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4409 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004410 printk(KERN_WARNING DRV_NAME
4411 " : Module param rx_frag_size must be 2048/4096/8192."
4412 " Using 2048\n");
4413 rx_frag_size = 2048;
4414 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004415
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004416 return pci_register_driver(&be_driver);
4417}
4418module_init(be_init_module);
4419
4420static void __exit be_exit_module(void)
4421{
4422 pci_unregister_driver(&be_driver);
4423}
4424module_exit(be_exit_module);