blob: cd69ac79f565bb4490c55507d0696d24ba84c106 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamc7bb15a2013-03-06 20:05:05 +00002 * Copyright (C) 2005 - 2013 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070024
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000028MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_LICENSE("GPL");
30
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla11ac75e2011-12-13 00:58:50 +000035static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070048 { 0 }
49};
50MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000051/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070052static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000053 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85};
86/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070087static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000088 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700111 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700121
Sathya Perla752961a2011-10-24 02:45:03 +0000122/* Is BE in a multi-channel mode */
123static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127}
128
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700129static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130{
131 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000135 mem->va = NULL;
136 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
Joe Perches1f9061d22013-03-15 07:23:58 +0000149 GFP_KERNEL | __GFP_ZERO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000151 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 return 0;
153}
154
Somnath Kotur68c45a22013-03-14 02:42:07 +0000155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169
Sathya Perladb3ea782011-08-22 19:41:52 +0000170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Somnath Kotur68c45a22013-03-14 02:42:07 +0000174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
Sathya Perla8788fdc2009-07-27 22:52:03 +0000190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700191{
192 u32 val = 0;
193 val |= qid & DB_RQ_RING_ID_MASK;
194 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000195
196 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000197 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700198}
199
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000200static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
201 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202{
203 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000206
207 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000208 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209}
210
Sathya Perla8788fdc2009-07-27 22:52:03 +0000211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700212 bool arm, bool clear_int, u16 num_popped)
213{
214 u32 val = 0;
215 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000216 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000218
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000219 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000220 return;
221
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700229}
230
Sathya Perla8788fdc2009-07-27 22:52:03 +0000231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232{
233 u32 val = 0;
234 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000235 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000237
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000238 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000239 return;
240
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241 if (arm)
242 val |= 1 << DB_CQ_REARM_SHIFT;
243 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000244 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245}
246
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247static int be_mac_addr_set(struct net_device *netdev, void *p)
248{
249 struct be_adapter *adapter = netdev_priv(netdev);
250 struct sockaddr *addr = p;
251 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000252 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000253 u32 pmac_id = adapter->pmac_id[0];
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000254 bool active_mac = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700255
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000256 if (!is_valid_ether_addr(addr->sa_data))
257 return -EADDRNOTAVAIL;
258
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000259 /* For BE VF, MAC address is already activated by PF.
260 * Hence only operation left is updating netdev->devaddr.
261 * Update it if user is passing the same MAC which was used
262 * during configuring VF MAC from PF(Hypervisor).
263 */
264 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
265 status = be_cmd_mac_addr_query(adapter, current_mac,
266 false, adapter->if_handle, 0);
267 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
268 goto done;
269 else
270 goto err;
271 }
272
273 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
274 goto done;
275
276 /* For Lancer check if any MAC is active.
277 * If active, get its mac id.
278 */
279 if (lancer_chip(adapter) && !be_physfn(adapter))
280 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
281 &pmac_id, 0);
282
283 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
284 adapter->if_handle,
285 &adapter->pmac_id[0], 0);
286
Sathya Perlaa65027e2009-08-17 00:58:04 +0000287 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000288 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700289
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000290 if (active_mac)
291 be_cmd_pmac_del(adapter, adapter->if_handle,
292 pmac_id, 0);
293done:
Somnath Koture3a7ae22011-10-27 07:14:05 +0000294 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
295 return 0;
296err:
297 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700298 return status;
299}
300
Sathya Perlaca34fe32012-11-06 17:48:56 +0000301/* BE2 supports only v0 cmd */
302static void *hw_stats_from_cmd(struct be_adapter *adapter)
303{
304 if (BE2_chip(adapter)) {
305 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
306
307 return &cmd->hw_stats;
308 } else {
309 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
310
311 return &cmd->hw_stats;
312 }
313}
314
315/* BE2 supports only v0 cmd */
316static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
317{
318 if (BE2_chip(adapter)) {
319 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
320
321 return &hw_stats->erx;
322 } else {
323 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
324
325 return &hw_stats->erx;
326 }
327}
328
329static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000330{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000334 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000335 &rxf_stats->port[adapter->port_num];
336 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000337
Sathya Perlaac124ff2011-07-25 19:10:14 +0000338 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000339 drvs->rx_pause_frames = port_stats->rx_pause_frames;
340 drvs->rx_crc_errors = port_stats->rx_crc_errors;
341 drvs->rx_control_frames = port_stats->rx_control_frames;
342 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000354 drvs->rx_dropped_header_too_small =
355 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000356 drvs->rx_address_filtered =
357 port_stats->rx_address_filtered +
358 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000359 drvs->rx_alignment_symbol_errors =
360 port_stats->rx_alignment_symbol_errors;
361
362 drvs->tx_pauseframes = port_stats->tx_pauseframes;
363 drvs->tx_controlframes = port_stats->tx_controlframes;
364
365 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000366 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000367 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000368 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000369 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000371 drvs->forwarded_packets = rxf_stats->forwarded_packets;
372 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000373 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000375 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
376}
377
Sathya Perlaca34fe32012-11-06 17:48:56 +0000378static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000379{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000380 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000383 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 &rxf_stats->port[adapter->port_num];
385 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386
Sathya Perlaac124ff2011-07-25 19:10:14 +0000387 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000388 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390 drvs->rx_pause_frames = port_stats->rx_pause_frames;
391 drvs->rx_crc_errors = port_stats->rx_crc_errors;
392 drvs->rx_control_frames = port_stats->rx_control_frames;
393 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403 drvs->rx_dropped_header_too_small =
404 port_stats->rx_dropped_header_too_small;
405 drvs->rx_input_fifo_overflow_drop =
406 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000407 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000408 drvs->rx_alignment_symbol_errors =
409 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000410 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000411 drvs->tx_pauseframes = port_stats->tx_pauseframes;
412 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000413 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000414 drvs->jabber_events = port_stats->jabber_events;
415 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000416 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000417 drvs->forwarded_packets = rxf_stats->forwarded_packets;
418 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000419 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
420 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
422}
423
Selvin Xavier005d5692011-05-16 07:36:35 +0000424static void populate_lancer_stats(struct be_adapter *adapter)
425{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000426
Selvin Xavier005d5692011-05-16 07:36:35 +0000427 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000428 struct lancer_pport_stats *pport_stats =
429 pport_stats_from_cmd(adapter);
430
431 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
432 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
433 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
434 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000435 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000436 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000437 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
438 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
439 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
440 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
441 drvs->rx_dropped_tcp_length =
442 pport_stats->rx_dropped_invalid_tcp_length;
443 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
444 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
445 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
446 drvs->rx_dropped_header_too_small =
447 pport_stats->rx_dropped_header_too_small;
448 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000449 drvs->rx_address_filtered =
450 pport_stats->rx_address_filtered +
451 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000452 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000453 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000454 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
455 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000456 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000457 drvs->forwarded_packets = pport_stats->num_forwards_lo;
458 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000459 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000460 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000461}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000462
Sathya Perla09c1c682011-08-22 19:41:53 +0000463static void accumulate_16bit_val(u32 *acc, u16 val)
464{
465#define lo(x) (x & 0xFFFF)
466#define hi(x) (x & 0xFFFF0000)
467 bool wrapped = val < lo(*acc);
468 u32 newacc = hi(*acc) + val;
469
470 if (wrapped)
471 newacc += 65536;
472 ACCESS_ONCE(*acc) = newacc;
473}
474
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000475void populate_erx_stats(struct be_adapter *adapter,
476 struct be_rx_obj *rxo,
477 u32 erx_stat)
478{
479 if (!BEx_chip(adapter))
480 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
481 else
482 /* below erx HW counter can actually wrap around after
483 * 65535. Driver accumulates a 32-bit value
484 */
485 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
486 (u16)erx_stat);
487}
488
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000489void be_parse_stats(struct be_adapter *adapter)
490{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000491 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
492 struct be_rx_obj *rxo;
493 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000494 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000495
Sathya Perlaca34fe32012-11-06 17:48:56 +0000496 if (lancer_chip(adapter)) {
497 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000498 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000499 if (BE2_chip(adapter))
500 populate_be_v0_stats(adapter);
501 else
502 /* for BE3 and Skyhawk */
503 populate_be_v1_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000504
Sathya Perlaca34fe32012-11-06 17:48:56 +0000505 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
506 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000507 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
508 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000509 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000510 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000511}
512
Sathya Perlaab1594e2011-07-25 19:10:15 +0000513static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
514 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700515{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000516 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000517 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700518 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000519 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000520 u64 pkts, bytes;
521 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700522 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700523
Sathya Perla3abcded2010-10-03 22:12:27 -0700524 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000525 const struct be_rx_stats *rx_stats = rx_stats(rxo);
526 do {
527 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
528 pkts = rx_stats(rxo)->rx_pkts;
529 bytes = rx_stats(rxo)->rx_bytes;
530 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
531 stats->rx_packets += pkts;
532 stats->rx_bytes += bytes;
533 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
534 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
535 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700536 }
537
Sathya Perla3c8def92011-06-12 20:01:58 +0000538 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000539 const struct be_tx_stats *tx_stats = tx_stats(txo);
540 do {
541 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
542 pkts = tx_stats(txo)->tx_pkts;
543 bytes = tx_stats(txo)->tx_bytes;
544 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
545 stats->tx_packets += pkts;
546 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000547 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700548
549 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000550 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000551 drvs->rx_alignment_symbol_errors +
552 drvs->rx_in_range_errors +
553 drvs->rx_out_range_errors +
554 drvs->rx_frame_too_long +
555 drvs->rx_dropped_too_small +
556 drvs->rx_dropped_too_short +
557 drvs->rx_dropped_header_too_small +
558 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000559 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700560
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700561 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000562 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000563 drvs->rx_out_range_errors +
564 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000565
Sathya Perlaab1594e2011-07-25 19:10:15 +0000566 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700567
568 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000569 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000570
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700571 /* receiver fifo overrun */
572 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000573 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000574 drvs->rx_input_fifo_overflow_drop +
575 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000576 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700577}
578
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000579void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700580{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700581 struct net_device *netdev = adapter->netdev;
582
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000583 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000584 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000585 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700586 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000587
588 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
589 netif_carrier_on(netdev);
590 else
591 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700592}
593
Sathya Perla3c8def92011-06-12 20:01:58 +0000594static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000595 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700596{
Sathya Perla3c8def92011-06-12 20:01:58 +0000597 struct be_tx_stats *stats = tx_stats(txo);
598
Sathya Perlaab1594e2011-07-25 19:10:15 +0000599 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000600 stats->tx_reqs++;
601 stats->tx_wrbs += wrb_cnt;
602 stats->tx_bytes += copied;
603 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700604 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000605 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000606 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700607}
608
609/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000610static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
611 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700612{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700613 int cnt = (skb->len > skb->data_len);
614
615 cnt += skb_shinfo(skb)->nr_frags;
616
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700617 /* to account for hdr wrb */
618 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000619 if (lancer_chip(adapter) || !(cnt & 1)) {
620 *dummy = false;
621 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700622 /* add a dummy to make it an even num */
623 cnt++;
624 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000625 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700626 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
627 return cnt;
628}
629
630static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
631{
632 wrb->frag_pa_hi = upper_32_bits(addr);
633 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
634 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000635 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636}
637
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000638static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
639 struct sk_buff *skb)
640{
641 u8 vlan_prio;
642 u16 vlan_tag;
643
644 vlan_tag = vlan_tx_tag_get(skb);
645 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
646 /* If vlan priority provided by OS is NOT in available bmap */
647 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
648 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
649 adapter->recommended_prio;
650
651 return vlan_tag;
652}
653
Somnath Koturcc4ce022010-10-21 07:11:14 -0700654static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000655 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700656{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000657 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700658
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700659 memset(hdr, 0, sizeof(*hdr));
660
661 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
662
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000663 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
666 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000667 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000668 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700669 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
670 if (is_tcp_pkt(skb))
671 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
672 else if (is_udp_pkt(skb))
673 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
674 }
675
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700676 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700677 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000678 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700679 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700680 }
681
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000682 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
683 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700685 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
686 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
687}
688
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000689static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000690 bool unmap_single)
691{
692 dma_addr_t dma;
693
694 be_dws_le_to_cpu(wrb, sizeof(*wrb));
695
696 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000697 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000698 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000699 dma_unmap_single(dev, dma, wrb->frag_len,
700 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000701 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000702 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000703 }
704}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700705
Sathya Perla3c8def92011-06-12 20:01:58 +0000706static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000707 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
708 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700709{
Sathya Perla7101e112010-03-22 20:41:12 +0000710 dma_addr_t busaddr;
711 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000712 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700713 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700714 struct be_eth_wrb *wrb;
715 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000716 bool map_single = false;
717 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700718
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700719 hdr = queue_head_node(txq);
720 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000721 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700722
David S. Millerebc8d2a2009-06-09 01:01:31 -0700723 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700724 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000725 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
726 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000727 goto dma_err;
728 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700729 wrb = queue_head_node(txq);
730 wrb_fill(wrb, busaddr, len);
731 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732 queue_head_inc(txq);
733 copied += len;
734 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700735
David S. Millerebc8d2a2009-06-09 01:01:31 -0700736 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000737 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700738 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000739 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000740 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000741 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000742 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700743 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000744 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700745 be_dws_cpu_to_le(wrb, sizeof(*wrb));
746 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000747 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700748 }
749
750 if (dummy_wrb) {
751 wrb = queue_head_node(txq);
752 wrb_fill(wrb, 0, 0);
753 be_dws_cpu_to_le(wrb, sizeof(*wrb));
754 queue_head_inc(txq);
755 }
756
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000757 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700758 be_dws_cpu_to_le(hdr, sizeof(*hdr));
759
760 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000761dma_err:
762 txq->head = map_head;
763 while (copied) {
764 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000765 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000766 map_single = false;
767 copied -= wrb->frag_len;
768 queue_head_inc(txq);
769 }
770 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700771}
772
Somnath Kotur93040ae2012-06-26 22:32:10 +0000773static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000774 struct sk_buff *skb,
775 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000776{
777 u16 vlan_tag = 0;
778
779 skb = skb_share_check(skb, GFP_ATOMIC);
780 if (unlikely(!skb))
781 return skb;
782
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000783 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000784 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000785 else if (qnq_async_evt_rcvd(adapter) && adapter->pvid)
786 vlan_tag = adapter->pvid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000787
788 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400789 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000790 if (unlikely(!skb))
791 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000792 skb->vlan_tci = 0;
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000793 if (skip_hw_vlan)
794 *skip_hw_vlan = true;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000795 }
796
797 /* Insert the outer VLAN, if any */
798 if (adapter->qnq_vid) {
799 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400800 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000801 if (unlikely(!skb))
802 return skb;
803 if (skip_hw_vlan)
804 *skip_hw_vlan = true;
805 }
806
Somnath Kotur93040ae2012-06-26 22:32:10 +0000807 return skb;
808}
809
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000810static bool be_ipv6_exthdr_check(struct sk_buff *skb)
811{
812 struct ethhdr *eh = (struct ethhdr *)skb->data;
813 u16 offset = ETH_HLEN;
814
815 if (eh->h_proto == htons(ETH_P_IPV6)) {
816 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
817
818 offset += sizeof(struct ipv6hdr);
819 if (ip6h->nexthdr != NEXTHDR_TCP &&
820 ip6h->nexthdr != NEXTHDR_UDP) {
821 struct ipv6_opt_hdr *ehdr =
822 (struct ipv6_opt_hdr *) (skb->data + offset);
823
824 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
825 if (ehdr->hdrlen == 0xff)
826 return true;
827 }
828 }
829 return false;
830}
831
832static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
833{
834 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
835}
836
Sathya Perlaee9c7992013-05-22 23:04:55 +0000837static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
838 struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000839{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000840 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000841}
842
Sathya Perlaee9c7992013-05-22 23:04:55 +0000843static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
844 struct sk_buff *skb,
845 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700846{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000847 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000848 unsigned int eth_hdr_len;
849 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000850
Somnath Kotur48265662013-05-26 21:08:47 +0000851 /* Lancer ASIC has a bug wherein packets that are 32 bytes or less
852 * may cause a transmit stall on that port. So the work-around is to
853 * pad such packets to a 36-byte length.
854 */
855 if (unlikely(lancer_chip(adapter) && skb->len <= 32)) {
856 if (skb_padto(skb, 36))
857 goto tx_drop;
858 skb->len = 36;
859 }
860
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000861 /* For padded packets, BE HW modifies tot_len field in IP header
862 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000863 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000864 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000865 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
866 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000867 if (skb->len <= 60 &&
868 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000869 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000870 ip = (struct iphdr *)ip_hdr(skb);
871 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
872 }
873
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000874 /* If vlan tag is already inlined in the packet, skip HW VLAN
875 * tagging in UMC mode
876 */
877 if ((adapter->function_mode & UMC_ENABLED) &&
878 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perlaee9c7992013-05-22 23:04:55 +0000879 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000880
Somnath Kotur93040ae2012-06-26 22:32:10 +0000881 /* HW has a bug wherein it will calculate CSUM for VLAN
882 * pkts even though it is disabled.
883 * Manually insert VLAN in pkt.
884 */
885 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000886 vlan_tx_tag_present(skb)) {
887 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000888 if (unlikely(!skb))
889 goto tx_drop;
890 }
891
892 /* HW may lockup when VLAN HW tagging is requested on
893 * certain ipv6 packets. Drop such pkts if the HW workaround to
894 * skip HW tagging is not enabled by FW.
895 */
896 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000897 (adapter->pvid || adapter->qnq_vid) &&
898 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000899 goto tx_drop;
900
901 /* Manual VLAN tag insertion to prevent:
902 * ASIC lockup when the ASIC inserts VLAN tag into
903 * certain ipv6 packets. Insert VLAN tags in driver,
904 * and set event, completion, vlan bits accordingly
905 * in the Tx WRB.
906 */
907 if (be_ipv6_tx_stall_chk(adapter, skb) &&
908 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000909 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000910 if (unlikely(!skb))
911 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000912 }
913
Sathya Perlaee9c7992013-05-22 23:04:55 +0000914 return skb;
915tx_drop:
916 dev_kfree_skb_any(skb);
917 return NULL;
918}
919
920static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
921{
922 struct be_adapter *adapter = netdev_priv(netdev);
923 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
924 struct be_queue_info *txq = &txo->q;
925 bool dummy_wrb, stopped = false;
926 u32 wrb_cnt = 0, copied = 0;
927 bool skip_hw_vlan = false;
928 u32 start = txq->head;
929
930 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
931 if (!skb)
932 return NETDEV_TX_OK;
933
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000934 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700935
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000936 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
937 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000938 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000939 int gso_segs = skb_shinfo(skb)->gso_segs;
940
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000941 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000942 BUG_ON(txo->sent_skb_list[start]);
943 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700944
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000945 /* Ensure txq has space for the next skb; Else stop the queue
946 * *BEFORE* ringing the tx doorbell, so that we serialze the
947 * tx compls of the current transmit which'll wake up the queue
948 */
Sathya Perla7101e112010-03-22 20:41:12 +0000949 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000950 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
951 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000952 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000953 stopped = true;
954 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700955
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000956 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000957
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000958 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000959 } else {
960 txq->head = start;
961 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700962 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700963 return NETDEV_TX_OK;
964}
965
966static int be_change_mtu(struct net_device *netdev, int new_mtu)
967{
968 struct be_adapter *adapter = netdev_priv(netdev);
969 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000970 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
971 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700972 dev_info(&adapter->pdev->dev,
973 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000974 BE_MIN_MTU,
975 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700976 return -EINVAL;
977 }
978 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
979 netdev->mtu, new_mtu);
980 netdev->mtu = new_mtu;
981 return 0;
982}
983
984/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000985 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
986 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700987 */
Sathya Perla10329df2012-06-05 19:37:18 +0000988static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700989{
Sathya Perla10329df2012-06-05 19:37:18 +0000990 u16 vids[BE_NUM_VLANS_SUPPORTED];
991 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000992 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000993
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000994 /* No need to further configure vids if in promiscuous mode */
995 if (adapter->promiscuous)
996 return 0;
997
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000998 if (adapter->vlans_added > adapter->max_vlans)
999 goto set_vlan_promisc;
1000
1001 /* Construct VLAN Table to give to HW */
1002 for (i = 0; i < VLAN_N_VID; i++)
1003 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +00001004 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001005
1006 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +00001007 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001008
1009 /* Set to VLAN promisc mode as setting VLAN filter failed */
1010 if (status) {
1011 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1012 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
1013 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001014 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001015
Sathya Perlab31c50a2009-09-17 10:30:13 -07001016 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001017
1018set_vlan_promisc:
1019 status = be_cmd_vlan_config(adapter, adapter->if_handle,
1020 NULL, 0, 1, 1);
1021 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001022}
1023
Patrick McHardy80d5c362013-04-19 02:04:28 +00001024static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001025{
1026 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001027 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001028
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001029 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001030 status = -EINVAL;
1031 goto ret;
1032 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001033
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001034 /* Packets with VID 0 are always received by Lancer by default */
1035 if (lancer_chip(adapter) && vid == 0)
1036 goto ret;
1037
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001038 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001039 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Sathya Perla10329df2012-06-05 19:37:18 +00001040 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001041
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001042 if (!status)
1043 adapter->vlans_added++;
1044 else
1045 adapter->vlan_tag[vid] = 0;
1046ret:
1047 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001048}
1049
Patrick McHardy80d5c362013-04-19 02:04:28 +00001050static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001051{
1052 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001053 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001054
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001055 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001056 status = -EINVAL;
1057 goto ret;
1058 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001059
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001060 /* Packets with VID 0 are always received by Lancer by default */
1061 if (lancer_chip(adapter) && vid == 0)
1062 goto ret;
1063
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001064 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001065 if (adapter->vlans_added <= adapter->max_vlans)
Sathya Perla10329df2012-06-05 19:37:18 +00001066 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001067
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001068 if (!status)
1069 adapter->vlans_added--;
1070 else
1071 adapter->vlan_tag[vid] = 1;
1072ret:
1073 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001074}
1075
Sathya Perlaa54769f2011-10-24 02:45:00 +00001076static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001077{
1078 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001079 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001080
1081 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001082 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001083 adapter->promiscuous = true;
1084 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001085 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001086
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001087 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001088 if (adapter->promiscuous) {
1089 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +00001090 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001091
1092 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001093 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001094 }
1095
Sathya Perlae7b909a2009-11-22 22:01:10 +00001096 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001097 if (netdev->flags & IFF_ALLMULTI ||
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001098 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001099 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001100 goto done;
1101 }
1102
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001103 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1104 struct netdev_hw_addr *ha;
1105 int i = 1; /* First slot is claimed by the Primary MAC */
1106
1107 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1108 be_cmd_pmac_del(adapter, adapter->if_handle,
1109 adapter->pmac_id[i], 0);
1110 }
1111
1112 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
1113 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1114 adapter->promiscuous = true;
1115 goto done;
1116 }
1117
1118 netdev_for_each_uc_addr(ha, adapter->netdev) {
1119 adapter->uc_macs++; /* First slot is for Primary MAC */
1120 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1121 adapter->if_handle,
1122 &adapter->pmac_id[adapter->uc_macs], 0);
1123 }
1124 }
1125
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001126 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1127
1128 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1129 if (status) {
1130 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1131 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1132 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1133 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001134done:
1135 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001136}
1137
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001138static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1139{
1140 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001141 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001142 int status;
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +00001143 bool active_mac = false;
1144 u32 pmac_id;
1145 u8 old_mac[ETH_ALEN];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001146
Sathya Perla11ac75e2011-12-13 00:58:50 +00001147 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001148 return -EPERM;
1149
Sathya Perla11ac75e2011-12-13 00:58:50 +00001150 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001151 return -EINVAL;
1152
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001153 if (lancer_chip(adapter)) {
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +00001154 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1155 &pmac_id, vf + 1);
1156 if (!status && active_mac)
1157 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1158 pmac_id, vf + 1);
1159
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001160 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1161 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +00001162 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1163 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001164
Sathya Perla11ac75e2011-12-13 00:58:50 +00001165 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1166 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001167 }
1168
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001169 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001170 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1171 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001172 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001173 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001174
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001175 return status;
1176}
1177
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001178static int be_get_vf_config(struct net_device *netdev, int vf,
1179 struct ifla_vf_info *vi)
1180{
1181 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001182 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001183
Sathya Perla11ac75e2011-12-13 00:58:50 +00001184 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001185 return -EPERM;
1186
Sathya Perla11ac75e2011-12-13 00:58:50 +00001187 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001188 return -EINVAL;
1189
1190 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001191 vi->tx_rate = vf_cfg->tx_rate;
1192 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001193 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001194 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001195
1196 return 0;
1197}
1198
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001199static int be_set_vf_vlan(struct net_device *netdev,
1200 int vf, u16 vlan, u8 qos)
1201{
1202 struct be_adapter *adapter = netdev_priv(netdev);
1203 int status = 0;
1204
Sathya Perla11ac75e2011-12-13 00:58:50 +00001205 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001206 return -EPERM;
1207
Sathya Perla11ac75e2011-12-13 00:58:50 +00001208 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001209 return -EINVAL;
1210
1211 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001212 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1213 /* If this is new value, program it. Else skip. */
1214 adapter->vf_cfg[vf].vlan_tag = vlan;
1215
1216 status = be_cmd_set_hsw_config(adapter, vlan,
1217 vf + 1, adapter->vf_cfg[vf].if_handle);
1218 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001219 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001220 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001221 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001222 vlan = adapter->vf_cfg[vf].def_vid;
1223 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1224 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001225 }
1226
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001227
1228 if (status)
1229 dev_info(&adapter->pdev->dev,
1230 "VLAN %d config on VF %d failed\n", vlan, vf);
1231 return status;
1232}
1233
Ajit Khapardee1d18732010-07-23 01:52:13 +00001234static int be_set_vf_tx_rate(struct net_device *netdev,
1235 int vf, int rate)
1236{
1237 struct be_adapter *adapter = netdev_priv(netdev);
1238 int status = 0;
1239
Sathya Perla11ac75e2011-12-13 00:58:50 +00001240 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001241 return -EPERM;
1242
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001243 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001244 return -EINVAL;
1245
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001246 if (rate < 100 || rate > 10000) {
1247 dev_err(&adapter->pdev->dev,
1248 "tx rate must be between 100 and 10000 Mbps\n");
1249 return -EINVAL;
1250 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001251
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001252 if (lancer_chip(adapter))
1253 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1254 else
1255 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001256
1257 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001258 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001259 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001260 else
1261 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001262 return status;
1263}
1264
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001265static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001266{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001267 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001268 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001269 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001270 u64 pkts;
1271 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001272
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001273 if (!eqo->enable_aic) {
1274 eqd = eqo->eqd;
1275 goto modify_eqd;
1276 }
1277
1278 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001279 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001280
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001281 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1282
Sathya Perla4097f662009-03-24 16:40:13 -07001283 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001284 if (time_before(now, stats->rx_jiffies)) {
1285 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001286 return;
1287 }
1288
Sathya Perlaac124ff2011-07-25 19:10:14 +00001289 /* Update once a second */
1290 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001291 return;
1292
Sathya Perlaab1594e2011-07-25 19:10:15 +00001293 do {
1294 start = u64_stats_fetch_begin_bh(&stats->sync);
1295 pkts = stats->rx_pkts;
1296 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1297
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001298 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001299 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001300 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001301 eqd = (stats->rx_pps / 110000) << 3;
1302 eqd = min(eqd, eqo->max_eqd);
1303 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001304 if (eqd < 10)
1305 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001306
1307modify_eqd:
1308 if (eqd != eqo->cur_eqd) {
1309 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1310 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001311 }
Sathya Perla4097f662009-03-24 16:40:13 -07001312}
1313
Sathya Perla3abcded2010-10-03 22:12:27 -07001314static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001315 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001316{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001317 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001318
Sathya Perlaab1594e2011-07-25 19:10:15 +00001319 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001320 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001321 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001322 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001323 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001324 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001325 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001326 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001327 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001328}
1329
Sathya Perla2e588f82011-03-11 02:49:26 +00001330static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001331{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001332 /* L4 checksum is not reliable for non TCP/UDP packets.
1333 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001334 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1335 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001336}
1337
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001338static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1339 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001340{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001341 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001342 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001343 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001344
Sathya Perla3abcded2010-10-03 22:12:27 -07001345 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001346 BUG_ON(!rx_page_info->page);
1347
Ajit Khaparde205859a2010-02-09 01:34:21 +00001348 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001349 dma_unmap_page(&adapter->pdev->dev,
1350 dma_unmap_addr(rx_page_info, bus),
1351 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001352 rx_page_info->last_page_user = false;
1353 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001354
1355 atomic_dec(&rxq->used);
1356 return rx_page_info;
1357}
1358
1359/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001360static void be_rx_compl_discard(struct be_rx_obj *rxo,
1361 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001362{
Sathya Perla3abcded2010-10-03 22:12:27 -07001363 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001364 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001365 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001366
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001367 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001368 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001369 put_page(page_info->page);
1370 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001371 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001372 }
1373}
1374
1375/*
1376 * skb_fill_rx_data forms a complete skb for an ether frame
1377 * indicated by rxcp.
1378 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001379static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1380 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001381{
Sathya Perla3abcded2010-10-03 22:12:27 -07001382 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001383 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001384 u16 i, j;
1385 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001386 u8 *start;
1387
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001388 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001389 start = page_address(page_info->page) + page_info->page_offset;
1390 prefetch(start);
1391
1392 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001393 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001394
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001395 skb->len = curr_frag_len;
1396 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001397 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001398 /* Complete packet has now been moved to data */
1399 put_page(page_info->page);
1400 skb->data_len = 0;
1401 skb->tail += curr_frag_len;
1402 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001403 hdr_len = ETH_HLEN;
1404 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001405 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001406 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001407 skb_shinfo(skb)->frags[0].page_offset =
1408 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001409 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001410 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001411 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001412 skb->tail += hdr_len;
1413 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001414 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001415
Sathya Perla2e588f82011-03-11 02:49:26 +00001416 if (rxcp->pkt_size <= rx_frag_size) {
1417 BUG_ON(rxcp->num_rcvd != 1);
1418 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001419 }
1420
1421 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001422 index_inc(&rxcp->rxq_idx, rxq->len);
1423 remaining = rxcp->pkt_size - curr_frag_len;
1424 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001425 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001426 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001427
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001428 /* Coalesce all frags from the same physical page in one slot */
1429 if (page_info->page_offset == 0) {
1430 /* Fresh page */
1431 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001432 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001433 skb_shinfo(skb)->frags[j].page_offset =
1434 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001435 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001436 skb_shinfo(skb)->nr_frags++;
1437 } else {
1438 put_page(page_info->page);
1439 }
1440
Eric Dumazet9e903e02011-10-18 21:00:24 +00001441 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001442 skb->len += curr_frag_len;
1443 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001444 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001445 remaining -= curr_frag_len;
1446 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001447 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001448 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001449 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001450}
1451
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001452/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001453static void be_rx_compl_process(struct be_rx_obj *rxo,
1454 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001455{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001456 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001457 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001458 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001459
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001460 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001461 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001462 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001463 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001464 return;
1465 }
1466
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001467 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001468
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001469 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001470 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001471 else
1472 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001473
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001474 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001475 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001476 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001477 skb->rxhash = rxcp->rss_hash;
1478
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001479
Jiri Pirko343e43c2011-08-25 02:50:51 +00001480 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001481 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001482
1483 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001484}
1485
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001486/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001487void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1488 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001489{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001490 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001491 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001492 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001493 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001494 u16 remaining, curr_frag_len;
1495 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001496
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001497 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001498 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001499 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001500 return;
1501 }
1502
Sathya Perla2e588f82011-03-11 02:49:26 +00001503 remaining = rxcp->pkt_size;
1504 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001505 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001506
1507 curr_frag_len = min(remaining, rx_frag_size);
1508
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001509 /* Coalesce all frags from the same physical page in one slot */
1510 if (i == 0 || page_info->page_offset == 0) {
1511 /* First frag or Fresh page */
1512 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001513 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001514 skb_shinfo(skb)->frags[j].page_offset =
1515 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001516 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001517 } else {
1518 put_page(page_info->page);
1519 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001520 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001521 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001522 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001523 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001524 memset(page_info, 0, sizeof(*page_info));
1525 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001526 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001527
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001528 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001529 skb->len = rxcp->pkt_size;
1530 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001531 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001532 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001533 if (adapter->netdev->features & NETIF_F_RXHASH)
1534 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001535
Jiri Pirko343e43c2011-08-25 02:50:51 +00001536 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001537 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001538
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001539 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001540}
1541
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001542static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1543 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001544{
Sathya Perla2e588f82011-03-11 02:49:26 +00001545 rxcp->pkt_size =
1546 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1547 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1548 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1549 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001550 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001551 rxcp->ip_csum =
1552 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1553 rxcp->l4_csum =
1554 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1555 rxcp->ipv6 =
1556 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1557 rxcp->rxq_idx =
1558 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1559 rxcp->num_rcvd =
1560 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1561 rxcp->pkt_type =
1562 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001563 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001564 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001565 if (rxcp->vlanf) {
1566 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001567 compl);
1568 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1569 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001570 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001571 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001572}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001573
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001574static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1575 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001576{
1577 rxcp->pkt_size =
1578 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1579 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1580 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1581 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001582 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001583 rxcp->ip_csum =
1584 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1585 rxcp->l4_csum =
1586 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1587 rxcp->ipv6 =
1588 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1589 rxcp->rxq_idx =
1590 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1591 rxcp->num_rcvd =
1592 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1593 rxcp->pkt_type =
1594 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001595 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001596 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001597 if (rxcp->vlanf) {
1598 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001599 compl);
1600 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1601 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001602 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001603 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Somnath Koture38b1702013-05-29 22:55:56 +00001604 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1605 ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001606}
1607
1608static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1609{
1610 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1611 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1612 struct be_adapter *adapter = rxo->adapter;
1613
1614 /* For checking the valid bit it is Ok to use either definition as the
1615 * valid bit is at the same position in both v0 and v1 Rx compl */
1616 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001617 return NULL;
1618
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001619 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001620 be_dws_le_to_cpu(compl, sizeof(*compl));
1621
1622 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001623 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001624 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001625 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001626
Somnath Koture38b1702013-05-29 22:55:56 +00001627 if (rxcp->ip_frag)
1628 rxcp->l4_csum = 0;
1629
Sathya Perla15d72182011-03-21 20:49:26 +00001630 if (rxcp->vlanf) {
1631 /* vlanf could be wrongly set in some cards.
1632 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001633 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001634 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001635
Sathya Perla15d72182011-03-21 20:49:26 +00001636 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001637 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001638
Somnath Kotur939cf302011-08-18 21:51:49 -07001639 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001640 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001641 rxcp->vlanf = 0;
1642 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001643
1644 /* As the compl has been parsed, reset it; we wont touch it again */
1645 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001646
Sathya Perla3abcded2010-10-03 22:12:27 -07001647 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001648 return rxcp;
1649}
1650
Eric Dumazet1829b082011-03-01 05:48:12 +00001651static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001652{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001653 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001654
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001655 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001656 gfp |= __GFP_COMP;
1657 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001658}
1659
1660/*
1661 * Allocate a page, split it to fragments of size rx_frag_size and post as
1662 * receive buffers to BE
1663 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001664static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001665{
Sathya Perla3abcded2010-10-03 22:12:27 -07001666 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001667 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001668 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001669 struct page *pagep = NULL;
1670 struct be_eth_rx_d *rxd;
1671 u64 page_dmaaddr = 0, frag_dmaaddr;
1672 u32 posted, page_offset = 0;
1673
Sathya Perla3abcded2010-10-03 22:12:27 -07001674 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001675 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1676 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001677 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001678 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001679 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001680 break;
1681 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001682 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1683 0, adapter->big_page_size,
1684 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001685 page_info->page_offset = 0;
1686 } else {
1687 get_page(pagep);
1688 page_info->page_offset = page_offset + rx_frag_size;
1689 }
1690 page_offset = page_info->page_offset;
1691 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001692 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001693 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1694
1695 rxd = queue_head_node(rxq);
1696 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1697 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001698
1699 /* Any space left in the current big page for another frag? */
1700 if ((page_offset + rx_frag_size + rx_frag_size) >
1701 adapter->big_page_size) {
1702 pagep = NULL;
1703 page_info->last_page_user = true;
1704 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001705
1706 prev_page_info = page_info;
1707 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001708 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001709 }
1710 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001711 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001712
1713 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001714 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001715 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001716 } else if (atomic_read(&rxq->used) == 0) {
1717 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001718 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001719 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001720}
1721
Sathya Perla5fb379e2009-06-18 00:02:59 +00001722static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001723{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001724 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1725
1726 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1727 return NULL;
1728
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001729 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001730 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1731
1732 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1733
1734 queue_tail_inc(tx_cq);
1735 return txcp;
1736}
1737
Sathya Perla3c8def92011-06-12 20:01:58 +00001738static u16 be_tx_compl_process(struct be_adapter *adapter,
1739 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001740{
Sathya Perla3c8def92011-06-12 20:01:58 +00001741 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001742 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001743 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001744 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001745 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1746 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001747
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001748 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001749 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001750 sent_skbs[txq->tail] = NULL;
1751
1752 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001753 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001754
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001755 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001756 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001757 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001758 unmap_tx_frag(&adapter->pdev->dev, wrb,
1759 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001760 unmap_skb_hdr = false;
1761
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001762 num_wrbs++;
1763 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001764 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001765
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001766 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001767 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001768}
1769
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001770/* Return the number of events in the event queue */
1771static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001772{
1773 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001774 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001775
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001776 do {
1777 eqe = queue_tail_node(&eqo->q);
1778 if (eqe->evt == 0)
1779 break;
1780
1781 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001782 eqe->evt = 0;
1783 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001784 queue_tail_inc(&eqo->q);
1785 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001786
1787 return num;
1788}
1789
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001790/* Leaves the EQ is disarmed state */
1791static void be_eq_clean(struct be_eq_obj *eqo)
1792{
1793 int num = events_get(eqo);
1794
1795 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1796}
1797
1798static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001799{
1800 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001801 struct be_queue_info *rxq = &rxo->q;
1802 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001803 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00001804 struct be_adapter *adapter = rxo->adapter;
1805 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001806 u16 tail;
1807
Sathya Perlad23e9462012-12-17 19:38:51 +00001808 /* Consume pending rx completions.
1809 * Wait for the flush completion (identified by zero num_rcvd)
1810 * to arrive. Notify CQ even when there are no more CQ entries
1811 * for HW to flush partially coalesced CQ entries.
1812 * In Lancer, there is no need to wait for flush compl.
1813 */
1814 for (;;) {
1815 rxcp = be_rx_compl_get(rxo);
1816 if (rxcp == NULL) {
1817 if (lancer_chip(adapter))
1818 break;
1819
1820 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1821 dev_warn(&adapter->pdev->dev,
1822 "did not receive flush compl\n");
1823 break;
1824 }
1825 be_cq_notify(adapter, rx_cq->id, true, 0);
1826 mdelay(1);
1827 } else {
1828 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00001829 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00001830 if (rxcp->num_rcvd == 0)
1831 break;
1832 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001833 }
1834
Sathya Perlad23e9462012-12-17 19:38:51 +00001835 /* After cleanup, leave the CQ in unarmed state */
1836 be_cq_notify(adapter, rx_cq->id, false, 0);
1837
1838 /* Then free posted rx buffers that were not used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001839 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001840 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001841 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001842 put_page(page_info->page);
1843 memset(page_info, 0, sizeof(*page_info));
1844 }
1845 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001846 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001847}
1848
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001849static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001850{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001851 struct be_tx_obj *txo;
1852 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001853 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001854 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001855 struct sk_buff *sent_skb;
1856 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001857 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001858
Sathya Perlaa8e91792009-08-10 03:42:43 +00001859 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1860 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001861 pending_txqs = adapter->num_tx_qs;
1862
1863 for_all_tx_queues(adapter, txo, i) {
1864 txq = &txo->q;
1865 while ((txcp = be_tx_compl_get(&txo->cq))) {
1866 end_idx =
1867 AMAP_GET_BITS(struct amap_eth_tx_compl,
1868 wrb_index, txcp);
1869 num_wrbs += be_tx_compl_process(adapter, txo,
1870 end_idx);
1871 cmpl++;
1872 }
1873 if (cmpl) {
1874 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1875 atomic_sub(num_wrbs, &txq->used);
1876 cmpl = 0;
1877 num_wrbs = 0;
1878 }
1879 if (atomic_read(&txq->used) == 0)
1880 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001881 }
1882
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001883 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001884 break;
1885
1886 mdelay(1);
1887 } while (true);
1888
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001889 for_all_tx_queues(adapter, txo, i) {
1890 txq = &txo->q;
1891 if (atomic_read(&txq->used))
1892 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1893 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001894
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001895 /* free posted tx for which compls will never arrive */
1896 while (atomic_read(&txq->used)) {
1897 sent_skb = txo->sent_skb_list[txq->tail];
1898 end_idx = txq->tail;
1899 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1900 &dummy_wrb);
1901 index_adv(&end_idx, num_wrbs - 1, txq->len);
1902 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1903 atomic_sub(num_wrbs, &txq->used);
1904 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001905 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001906}
1907
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001908static void be_evt_queues_destroy(struct be_adapter *adapter)
1909{
1910 struct be_eq_obj *eqo;
1911 int i;
1912
1913 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001914 if (eqo->q.created) {
1915 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001916 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001917 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001918 be_queue_free(adapter, &eqo->q);
1919 }
1920}
1921
1922static int be_evt_queues_create(struct be_adapter *adapter)
1923{
1924 struct be_queue_info *eq;
1925 struct be_eq_obj *eqo;
1926 int i, rc;
1927
1928 adapter->num_evt_qs = num_irqs(adapter);
1929
1930 for_all_evt_queues(adapter, eqo, i) {
1931 eqo->adapter = adapter;
1932 eqo->tx_budget = BE_TX_BUDGET;
1933 eqo->idx = i;
1934 eqo->max_eqd = BE_MAX_EQD;
1935 eqo->enable_aic = true;
1936
1937 eq = &eqo->q;
1938 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1939 sizeof(struct be_eq_entry));
1940 if (rc)
1941 return rc;
1942
1943 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1944 if (rc)
1945 return rc;
1946 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001947 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001948}
1949
Sathya Perla5fb379e2009-06-18 00:02:59 +00001950static void be_mcc_queues_destroy(struct be_adapter *adapter)
1951{
1952 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001953
Sathya Perla8788fdc2009-07-27 22:52:03 +00001954 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001955 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001956 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001957 be_queue_free(adapter, q);
1958
Sathya Perla8788fdc2009-07-27 22:52:03 +00001959 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001960 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001961 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001962 be_queue_free(adapter, q);
1963}
1964
1965/* Must be called only after TX qs are created as MCC shares TX EQ */
1966static int be_mcc_queues_create(struct be_adapter *adapter)
1967{
1968 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001969
Sathya Perla8788fdc2009-07-27 22:52:03 +00001970 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001971 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001972 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001973 goto err;
1974
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001975 /* Use the default EQ for MCC completions */
1976 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001977 goto mcc_cq_free;
1978
Sathya Perla8788fdc2009-07-27 22:52:03 +00001979 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001980 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1981 goto mcc_cq_destroy;
1982
Sathya Perla8788fdc2009-07-27 22:52:03 +00001983 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001984 goto mcc_q_free;
1985
1986 return 0;
1987
1988mcc_q_free:
1989 be_queue_free(adapter, q);
1990mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001991 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001992mcc_cq_free:
1993 be_queue_free(adapter, cq);
1994err:
1995 return -1;
1996}
1997
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001998static void be_tx_queues_destroy(struct be_adapter *adapter)
1999{
2000 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002001 struct be_tx_obj *txo;
2002 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002003
Sathya Perla3c8def92011-06-12 20:01:58 +00002004 for_all_tx_queues(adapter, txo, i) {
2005 q = &txo->q;
2006 if (q->created)
2007 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2008 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002009
Sathya Perla3c8def92011-06-12 20:01:58 +00002010 q = &txo->cq;
2011 if (q->created)
2012 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2013 be_queue_free(adapter, q);
2014 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002015}
2016
Sathya Perladafc0fe2011-10-24 02:45:02 +00002017static int be_num_txqs_want(struct be_adapter *adapter)
2018{
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002019 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
2020 be_is_mc(adapter) ||
2021 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
Sathya Perlaca34fe32012-11-06 17:48:56 +00002022 BE2_chip(adapter))
Sathya Perladafc0fe2011-10-24 02:45:02 +00002023 return 1;
2024 else
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002025 return adapter->max_tx_queues;
Sathya Perladafc0fe2011-10-24 02:45:02 +00002026}
2027
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002028static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002029{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002030 struct be_queue_info *cq, *eq;
2031 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00002032 struct be_tx_obj *txo;
2033 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002034
Sathya Perladafc0fe2011-10-24 02:45:02 +00002035 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00002036 if (adapter->num_tx_qs != MAX_TX_QS) {
2037 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00002038 netif_set_real_num_tx_queues(adapter->netdev,
2039 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00002040 rtnl_unlock();
2041 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00002042
Sathya Perla3c8def92011-06-12 20:01:58 +00002043 for_all_tx_queues(adapter, txo, i) {
2044 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002045 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2046 sizeof(struct be_eth_tx_compl));
2047 if (status)
2048 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002049
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002050 /* If num_evt_qs is less than num_tx_qs, then more than
2051 * one txq share an eq
2052 */
2053 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2054 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2055 if (status)
2056 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00002057 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002058 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002059}
2060
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002061static int be_tx_qs_create(struct be_adapter *adapter)
2062{
2063 struct be_tx_obj *txo;
2064 int i, status;
2065
2066 for_all_tx_queues(adapter, txo, i) {
2067 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2068 sizeof(struct be_eth_wrb));
2069 if (status)
2070 return status;
2071
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002072 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002073 if (status)
2074 return status;
2075 }
2076
Sathya Perlad3791422012-09-28 04:39:44 +00002077 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2078 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002079 return 0;
2080}
2081
2082static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002083{
2084 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002085 struct be_rx_obj *rxo;
2086 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002087
Sathya Perla3abcded2010-10-03 22:12:27 -07002088 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002089 q = &rxo->cq;
2090 if (q->created)
2091 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2092 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002093 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002094}
2095
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002096static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002097{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002098 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002099 struct be_rx_obj *rxo;
2100 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002101
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002102 /* We'll create as many RSS rings as there are irqs.
2103 * But when there's only one irq there's no use creating RSS rings
2104 */
2105 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2106 num_irqs(adapter) + 1 : 1;
Sathya Perla7f640062012-06-05 19:37:20 +00002107 if (adapter->num_rx_qs != MAX_RX_QS) {
2108 rtnl_lock();
2109 netif_set_real_num_rx_queues(adapter->netdev,
2110 adapter->num_rx_qs);
2111 rtnl_unlock();
2112 }
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002113
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002114 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002115 for_all_rx_queues(adapter, rxo, i) {
2116 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002117 cq = &rxo->cq;
2118 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2119 sizeof(struct be_eth_rx_compl));
2120 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002121 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002122
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002123 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2124 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002125 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002126 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002127 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002128
Sathya Perlad3791422012-09-28 04:39:44 +00002129 dev_info(&adapter->pdev->dev,
2130 "created %d RSS queue(s) and 1 default RX queue\n",
2131 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002132 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002133}
2134
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002135static irqreturn_t be_intx(int irq, void *dev)
2136{
Sathya Perlae49cc342012-11-27 19:50:02 +00002137 struct be_eq_obj *eqo = dev;
2138 struct be_adapter *adapter = eqo->adapter;
2139 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002140
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002141 /* IRQ is not expected when NAPI is scheduled as the EQ
2142 * will not be armed.
2143 * But, this can happen on Lancer INTx where it takes
2144 * a while to de-assert INTx or in BE2 where occasionaly
2145 * an interrupt may be raised even when EQ is unarmed.
2146 * If NAPI is already scheduled, then counting & notifying
2147 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002148 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002149 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002150 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002151 __napi_schedule(&eqo->napi);
2152 if (num_evts)
2153 eqo->spurious_intr = 0;
2154 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002155 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002156
2157 /* Return IRQ_HANDLED only for the the first spurious intr
2158 * after a valid intr to stop the kernel from branding
2159 * this irq as a bad one!
2160 */
2161 if (num_evts || eqo->spurious_intr++ == 0)
2162 return IRQ_HANDLED;
2163 else
2164 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002165}
2166
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002167static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002168{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002169 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002170
Sathya Perla0b545a62012-11-23 00:27:18 +00002171 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2172 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002173 return IRQ_HANDLED;
2174}
2175
Sathya Perla2e588f82011-03-11 02:49:26 +00002176static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002177{
Somnath Koture38b1702013-05-29 22:55:56 +00002178 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002179}
2180
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002181static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2182 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002183{
Sathya Perla3abcded2010-10-03 22:12:27 -07002184 struct be_adapter *adapter = rxo->adapter;
2185 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002186 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002187 u32 work_done;
2188
2189 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002190 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002191 if (!rxcp)
2192 break;
2193
Sathya Perla12004ae2011-08-02 19:57:46 +00002194 /* Is it a flush compl that has no data */
2195 if (unlikely(rxcp->num_rcvd == 0))
2196 goto loop_continue;
2197
2198 /* Discard compl with partial DMA Lancer B0 */
2199 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002200 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002201 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002202 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002203
Sathya Perla12004ae2011-08-02 19:57:46 +00002204 /* On BE drop pkts that arrive due to imperfect filtering in
2205 * promiscuous mode on some skews
2206 */
2207 if (unlikely(rxcp->port != adapter->port_num &&
2208 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002209 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002210 goto loop_continue;
2211 }
2212
2213 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002214 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002215 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002216 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002217loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002218 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002219 }
2220
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002221 if (work_done) {
2222 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002223
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002224 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2225 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002226 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002227
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002228 return work_done;
2229}
2230
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002231static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2232 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002233{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002234 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002235 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002236
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002237 for (work_done = 0; work_done < budget; work_done++) {
2238 txcp = be_tx_compl_get(&txo->cq);
2239 if (!txcp)
2240 break;
2241 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002242 AMAP_GET_BITS(struct amap_eth_tx_compl,
2243 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002244 }
2245
2246 if (work_done) {
2247 be_cq_notify(adapter, txo->cq.id, true, work_done);
2248 atomic_sub(num_wrbs, &txo->q.used);
2249
2250 /* As Tx wrbs have been freed up, wake up netdev queue
2251 * if it was stopped due to lack of tx wrbs. */
2252 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2253 atomic_read(&txo->q.used) < txo->q.len / 2) {
2254 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002255 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002256
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002257 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2258 tx_stats(txo)->tx_compl += work_done;
2259 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2260 }
2261 return (work_done < budget); /* Done */
2262}
Sathya Perla3c8def92011-06-12 20:01:58 +00002263
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002264int be_poll(struct napi_struct *napi, int budget)
2265{
2266 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2267 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002268 int max_work = 0, work, i, num_evts;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002269 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002270
Sathya Perla0b545a62012-11-23 00:27:18 +00002271 num_evts = events_get(eqo);
2272
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002273 /* Process all TXQs serviced by this EQ */
2274 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2275 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2276 eqo->tx_budget, i);
2277 if (!tx_done)
2278 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002279 }
2280
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002281 /* This loop will iterate twice for EQ0 in which
2282 * completions of the last RXQ (default one) are also processed
2283 * For other EQs the loop iterates only once
2284 */
2285 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2286 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2287 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002288 }
2289
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002290 if (is_mcc_eqo(eqo))
2291 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002292
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002293 if (max_work < budget) {
2294 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002295 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002296 } else {
2297 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002298 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002299 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002300 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002301}
2302
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002303void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002304{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002305 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2306 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002307 u32 i;
2308
Sathya Perlad23e9462012-12-17 19:38:51 +00002309 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002310 return;
2311
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002312 if (lancer_chip(adapter)) {
2313 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2314 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2315 sliport_err1 = ioread32(adapter->db +
2316 SLIPORT_ERROR1_OFFSET);
2317 sliport_err2 = ioread32(adapter->db +
2318 SLIPORT_ERROR2_OFFSET);
2319 }
2320 } else {
2321 pci_read_config_dword(adapter->pdev,
2322 PCICFG_UE_STATUS_LOW, &ue_lo);
2323 pci_read_config_dword(adapter->pdev,
2324 PCICFG_UE_STATUS_HIGH, &ue_hi);
2325 pci_read_config_dword(adapter->pdev,
2326 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2327 pci_read_config_dword(adapter->pdev,
2328 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002329
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002330 ue_lo = (ue_lo & ~ue_lo_mask);
2331 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002332 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002333
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002334 /* On certain platforms BE hardware can indicate spurious UEs.
2335 * Allow the h/w to stop working completely in case of a real UE.
2336 * Hence not setting the hw_error for UE detection.
2337 */
2338 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002339 adapter->hw_error = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002340 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002341 "Error detected in the card\n");
2342 }
2343
2344 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2345 dev_err(&adapter->pdev->dev,
2346 "ERR: sliport status 0x%x\n", sliport_status);
2347 dev_err(&adapter->pdev->dev,
2348 "ERR: sliport error1 0x%x\n", sliport_err1);
2349 dev_err(&adapter->pdev->dev,
2350 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002351 }
2352
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002353 if (ue_lo) {
2354 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2355 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002356 dev_err(&adapter->pdev->dev,
2357 "UE: %s bit set\n", ue_status_low_desc[i]);
2358 }
2359 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002360
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002361 if (ue_hi) {
2362 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2363 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002364 dev_err(&adapter->pdev->dev,
2365 "UE: %s bit set\n", ue_status_hi_desc[i]);
2366 }
2367 }
2368
2369}
2370
Sathya Perla8d56ff12009-11-22 22:02:26 +00002371static void be_msix_disable(struct be_adapter *adapter)
2372{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002373 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002374 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002375 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002376 }
2377}
2378
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002379static uint be_num_rss_want(struct be_adapter *adapter)
2380{
Yuval Mintz30e80b52012-07-01 03:19:00 +00002381 u32 num = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002382
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002383 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002384 (lancer_chip(adapter) ||
2385 (!sriov_want(adapter) && be_physfn(adapter)))) {
2386 num = adapter->max_rss_queues;
Yuval Mintz30e80b52012-07-01 03:19:00 +00002387 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2388 }
2389 return num;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002390}
2391
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002392static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002393{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002394#define BE_MIN_MSIX_VECTORS 1
Parav Pandit045508a2012-03-26 14:27:13 +00002395 int i, status, num_vec, num_roce_vec = 0;
Sathya Perlad3791422012-09-28 04:39:44 +00002396 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002397
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002398 /* If RSS queues are not used, need a vec for default RX Q */
2399 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
Parav Pandit045508a2012-03-26 14:27:13 +00002400 if (be_roce_supported(adapter)) {
2401 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2402 (num_online_cpus() + 1));
2403 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2404 num_vec += num_roce_vec;
2405 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2406 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002407 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002408
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002409 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002410 adapter->msix_entries[i].entry = i;
2411
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002412 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002413 if (status == 0) {
2414 goto done;
2415 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002416 num_vec = status;
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002417 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2418 num_vec);
2419 if (!status)
Sathya Perla3abcded2010-10-03 22:12:27 -07002420 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002421 }
Sathya Perlad3791422012-09-28 04:39:44 +00002422
2423 dev_warn(dev, "MSIx enable failed\n");
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002424 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2425 if (!be_physfn(adapter))
2426 return status;
2427 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002428done:
Parav Pandit045508a2012-03-26 14:27:13 +00002429 if (be_roce_supported(adapter)) {
2430 if (num_vec > num_roce_vec) {
2431 adapter->num_msix_vec = num_vec - num_roce_vec;
2432 adapter->num_msix_roce_vec =
2433 num_vec - adapter->num_msix_vec;
2434 } else {
2435 adapter->num_msix_vec = num_vec;
2436 adapter->num_msix_roce_vec = 0;
2437 }
2438 } else
2439 adapter->num_msix_vec = num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002440 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002441 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002442}
2443
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002444static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002445 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002446{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002447 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002448}
2449
2450static int be_msix_register(struct be_adapter *adapter)
2451{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002452 struct net_device *netdev = adapter->netdev;
2453 struct be_eq_obj *eqo;
2454 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002455
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002456 for_all_evt_queues(adapter, eqo, i) {
2457 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2458 vec = be_msix_vec_get(adapter, eqo);
2459 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002460 if (status)
2461 goto err_msix;
2462 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002463
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002464 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002465err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002466 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2467 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2468 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2469 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002470 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002471 return status;
2472}
2473
2474static int be_irq_register(struct be_adapter *adapter)
2475{
2476 struct net_device *netdev = adapter->netdev;
2477 int status;
2478
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002479 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002480 status = be_msix_register(adapter);
2481 if (status == 0)
2482 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002483 /* INTx is not supported for VF */
2484 if (!be_physfn(adapter))
2485 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002486 }
2487
Sathya Perlae49cc342012-11-27 19:50:02 +00002488 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002489 netdev->irq = adapter->pdev->irq;
2490 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002491 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002492 if (status) {
2493 dev_err(&adapter->pdev->dev,
2494 "INTx request IRQ failed - err %d\n", status);
2495 return status;
2496 }
2497done:
2498 adapter->isr_registered = true;
2499 return 0;
2500}
2501
2502static void be_irq_unregister(struct be_adapter *adapter)
2503{
2504 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002505 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002506 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002507
2508 if (!adapter->isr_registered)
2509 return;
2510
2511 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002512 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002513 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002514 goto done;
2515 }
2516
2517 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002518 for_all_evt_queues(adapter, eqo, i)
2519 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002520
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002521done:
2522 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002523}
2524
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002525static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002526{
2527 struct be_queue_info *q;
2528 struct be_rx_obj *rxo;
2529 int i;
2530
2531 for_all_rx_queues(adapter, rxo, i) {
2532 q = &rxo->q;
2533 if (q->created) {
2534 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002535 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002536 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002537 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002538 }
2539}
2540
Sathya Perla889cd4b2010-05-30 23:33:45 +00002541static int be_close(struct net_device *netdev)
2542{
2543 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002544 struct be_eq_obj *eqo;
2545 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002546
Parav Pandit045508a2012-03-26 14:27:13 +00002547 be_roce_dev_close(adapter);
2548
Somnath Kotur04d3d622013-05-02 03:36:55 +00002549 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2550 for_all_evt_queues(adapter, eqo, i)
2551 napi_disable(&eqo->napi);
2552 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2553 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002554
2555 be_async_mcc_disable(adapter);
2556
2557 /* Wait for all pending tx completions to arrive so that
2558 * all tx skbs are freed.
2559 */
2560 be_tx_compl_clean(adapter);
Sathya Perlafba87552013-05-08 02:05:50 +00002561 netif_tx_disable(netdev);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002562
2563 be_rx_qs_destroy(adapter);
2564
2565 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002566 if (msix_enabled(adapter))
2567 synchronize_irq(be_msix_vec_get(adapter, eqo));
2568 else
2569 synchronize_irq(netdev->irq);
2570 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002571 }
2572
Sathya Perla889cd4b2010-05-30 23:33:45 +00002573 be_irq_unregister(adapter);
2574
Sathya Perla482c9e72011-06-29 23:33:17 +00002575 return 0;
2576}
2577
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002578static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002579{
2580 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002581 int rc, i, j;
2582 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002583
2584 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002585 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2586 sizeof(struct be_eth_rx_d));
2587 if (rc)
2588 return rc;
2589 }
2590
2591 /* The FW would like the default RXQ to be created first */
2592 rxo = default_rxo(adapter);
2593 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2594 adapter->if_handle, false, &rxo->rss_id);
2595 if (rc)
2596 return rc;
2597
2598 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002599 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002600 rx_frag_size, adapter->if_handle,
2601 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002602 if (rc)
2603 return rc;
2604 }
2605
2606 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002607 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2608 for_all_rss_queues(adapter, rxo, i) {
2609 if ((j + i) >= 128)
2610 break;
2611 rsstable[j + i] = rxo->rss_id;
2612 }
2613 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002614 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2615 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2616
2617 if (!BEx_chip(adapter))
2618 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2619 RSS_ENABLE_UDP_IPV6;
2620
2621 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2622 128);
2623 if (rc) {
2624 adapter->rss_flags = 0;
Sathya Perla482c9e72011-06-29 23:33:17 +00002625 return rc;
Suresh Reddy594ad542013-04-25 23:03:20 +00002626 }
Sathya Perla482c9e72011-06-29 23:33:17 +00002627 }
2628
2629 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002630 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002631 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002632 return 0;
2633}
2634
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002635static int be_open(struct net_device *netdev)
2636{
2637 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002638 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002639 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002640 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002641 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002642 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002643
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002644 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002645 if (status)
2646 goto err;
2647
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002648 status = be_irq_register(adapter);
2649 if (status)
2650 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002651
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002652 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002653 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002654
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002655 for_all_tx_queues(adapter, txo, i)
2656 be_cq_notify(adapter, txo->cq.id, true, 0);
2657
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002658 be_async_mcc_enable(adapter);
2659
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002660 for_all_evt_queues(adapter, eqo, i) {
2661 napi_enable(&eqo->napi);
2662 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2663 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002664 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002665
Sathya Perla323ff712012-09-28 04:39:43 +00002666 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002667 if (!status)
2668 be_link_status_update(adapter, link_status);
2669
Sathya Perlafba87552013-05-08 02:05:50 +00002670 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002671 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002672 return 0;
2673err:
2674 be_close(adapter->netdev);
2675 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002676}
2677
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002678static int be_setup_wol(struct be_adapter *adapter, bool enable)
2679{
2680 struct be_dma_mem cmd;
2681 int status = 0;
2682 u8 mac[ETH_ALEN];
2683
2684 memset(mac, 0, ETH_ALEN);
2685
2686 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002687 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
Joe Perches1f9061d22013-03-15 07:23:58 +00002688 GFP_KERNEL | __GFP_ZERO);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002689 if (cmd.va == NULL)
2690 return -1;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002691
2692 if (enable) {
2693 status = pci_write_config_dword(adapter->pdev,
2694 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2695 if (status) {
2696 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002697 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002698 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2699 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002700 return status;
2701 }
2702 status = be_cmd_enable_magic_wol(adapter,
2703 adapter->netdev->dev_addr, &cmd);
2704 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2705 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2706 } else {
2707 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2708 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2709 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2710 }
2711
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002712 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002713 return status;
2714}
2715
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002716/*
2717 * Generate a seed MAC address from the PF MAC Address using jhash.
2718 * MAC Address for VFs are assigned incrementally starting from the seed.
2719 * These addresses are programmed in the ASIC by the PF and the VF driver
2720 * queries for the MAC address during its probe.
2721 */
Sathya Perla4c876612013-02-03 20:30:11 +00002722static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002723{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002724 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002725 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002726 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002727 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002728
2729 be_vf_eth_addr_generate(adapter, mac);
2730
Sathya Perla11ac75e2011-12-13 00:58:50 +00002731 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002732 if (lancer_chip(adapter)) {
2733 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2734 } else {
2735 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002736 vf_cfg->if_handle,
2737 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002738 }
2739
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002740 if (status)
2741 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002742 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002743 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002744 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002745
2746 mac[5] += 1;
2747 }
2748 return status;
2749}
2750
Sathya Perla4c876612013-02-03 20:30:11 +00002751static int be_vfs_mac_query(struct be_adapter *adapter)
2752{
2753 int status, vf;
2754 u8 mac[ETH_ALEN];
2755 struct be_vf_cfg *vf_cfg;
2756 bool active;
2757
2758 for_all_vfs(adapter, vf_cfg, vf) {
2759 be_cmd_get_mac_from_list(adapter, mac, &active,
2760 &vf_cfg->pmac_id, 0);
2761
2762 status = be_cmd_mac_addr_query(adapter, mac, false,
2763 vf_cfg->if_handle, 0);
2764 if (status)
2765 return status;
2766 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2767 }
2768 return 0;
2769}
2770
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002771static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002772{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002773 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002774 u32 vf;
2775
Sathya Perla257a3fe2013-06-14 15:54:51 +05302776 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00002777 dev_warn(&adapter->pdev->dev,
2778 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00002779 goto done;
2780 }
2781
Sathya Perlab4c1df92013-05-08 02:05:47 +00002782 pci_disable_sriov(adapter->pdev);
2783
Sathya Perla11ac75e2011-12-13 00:58:50 +00002784 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002785 if (lancer_chip(adapter))
2786 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2787 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002788 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2789 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002790
Sathya Perla11ac75e2011-12-13 00:58:50 +00002791 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2792 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002793done:
2794 kfree(adapter->vf_cfg);
2795 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002796}
2797
Sathya Perlaa54769f2011-10-24 02:45:00 +00002798static int be_clear(struct be_adapter *adapter)
2799{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002800 int i = 1;
2801
Sathya Perla191eb752012-02-23 18:50:13 +00002802 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2803 cancel_delayed_work_sync(&adapter->work);
2804 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2805 }
2806
Sathya Perla11ac75e2011-12-13 00:58:50 +00002807 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002808 be_vf_clear(adapter);
2809
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002810 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2811 be_cmd_pmac_del(adapter, adapter->if_handle,
2812 adapter->pmac_id[i], 0);
2813
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002814 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002815
2816 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002817 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002818 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002819 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002820
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002821 kfree(adapter->pmac_id);
2822 adapter->pmac_id = NULL;
2823
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002824 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002825 return 0;
2826}
2827
Sathya Perla4c876612013-02-03 20:30:11 +00002828static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002829{
Sathya Perla4c876612013-02-03 20:30:11 +00002830 struct be_vf_cfg *vf_cfg;
2831 u32 cap_flags, en_flags, vf;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002832 int status;
2833
Sathya Perla4c876612013-02-03 20:30:11 +00002834 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2835 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002836
Sathya Perla4c876612013-02-03 20:30:11 +00002837 for_all_vfs(adapter, vf_cfg, vf) {
2838 if (!BE3_chip(adapter))
Vasundhara Volama05f99d2013-04-21 23:28:17 +00002839 be_cmd_get_profile_config(adapter, &cap_flags,
2840 NULL, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00002841
2842 /* If a FW profile exists, then cap_flags are updated */
2843 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2844 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2845 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2846 &vf_cfg->if_handle, vf + 1);
2847 if (status)
2848 goto err;
2849 }
2850err:
2851 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002852}
2853
Sathya Perla39f1d942012-05-08 19:41:24 +00002854static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002855{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002856 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002857 int vf;
2858
Sathya Perla39f1d942012-05-08 19:41:24 +00002859 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2860 GFP_KERNEL);
2861 if (!adapter->vf_cfg)
2862 return -ENOMEM;
2863
Sathya Perla11ac75e2011-12-13 00:58:50 +00002864 for_all_vfs(adapter, vf_cfg, vf) {
2865 vf_cfg->if_handle = -1;
2866 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002867 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002868 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002869}
2870
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002871static int be_vf_setup(struct be_adapter *adapter)
2872{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002873 struct be_vf_cfg *vf_cfg;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002874 u16 def_vlan, lnk_speed;
Sathya Perla4c876612013-02-03 20:30:11 +00002875 int status, old_vfs, vf;
2876 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002877
Sathya Perla257a3fe2013-06-14 15:54:51 +05302878 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla4c876612013-02-03 20:30:11 +00002879 if (old_vfs) {
2880 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2881 if (old_vfs != num_vfs)
2882 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2883 adapter->num_vfs = old_vfs;
Sathya Perla39f1d942012-05-08 19:41:24 +00002884 } else {
Sathya Perla4c876612013-02-03 20:30:11 +00002885 if (num_vfs > adapter->dev_num_vfs)
2886 dev_info(dev, "Device supports %d VFs and not %d\n",
2887 adapter->dev_num_vfs, num_vfs);
2888 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
Sathya Perlab4c1df92013-05-08 02:05:47 +00002889 if (!adapter->num_vfs)
Sathya Perla4c876612013-02-03 20:30:11 +00002890 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00002891 }
2892
2893 status = be_vf_setup_init(adapter);
2894 if (status)
2895 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002896
Sathya Perla4c876612013-02-03 20:30:11 +00002897 if (old_vfs) {
2898 for_all_vfs(adapter, vf_cfg, vf) {
2899 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2900 if (status)
2901 goto err;
2902 }
2903 } else {
2904 status = be_vfs_if_create(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002905 if (status)
2906 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002907 }
2908
Sathya Perla4c876612013-02-03 20:30:11 +00002909 if (old_vfs) {
2910 status = be_vfs_mac_query(adapter);
2911 if (status)
2912 goto err;
2913 } else {
Sathya Perla39f1d942012-05-08 19:41:24 +00002914 status = be_vf_eth_addr_config(adapter);
2915 if (status)
2916 goto err;
2917 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002918
Sathya Perla11ac75e2011-12-13 00:58:50 +00002919 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla4c876612013-02-03 20:30:11 +00002920 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2921 * Allow full available bandwidth
2922 */
2923 if (BE3_chip(adapter) && !old_vfs)
2924 be_cmd_set_qos(adapter, 1000, vf+1);
2925
2926 status = be_cmd_link_status_query(adapter, &lnk_speed,
2927 NULL, vf + 1);
2928 if (!status)
2929 vf_cfg->tx_rate = lnk_speed;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002930
2931 status = be_cmd_get_hsw_config(adapter, &def_vlan,
Sathya Perla4c876612013-02-03 20:30:11 +00002932 vf + 1, vf_cfg->if_handle);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002933 if (status)
2934 goto err;
2935 vf_cfg->def_vid = def_vlan;
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00002936
2937 be_cmd_enable_vf(adapter, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002938 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00002939
2940 if (!old_vfs) {
2941 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2942 if (status) {
2943 dev_err(dev, "SRIOV enable failed\n");
2944 adapter->num_vfs = 0;
2945 goto err;
2946 }
2947 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002948 return 0;
2949err:
Sathya Perla4c876612013-02-03 20:30:11 +00002950 dev_err(dev, "VF setup failed\n");
2951 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002952 return status;
2953}
2954
Sathya Perla30128032011-11-10 19:17:57 +00002955static void be_setup_init(struct be_adapter *adapter)
2956{
2957 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002958 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002959 adapter->if_handle = -1;
2960 adapter->be3_native = false;
2961 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002962 if (be_physfn(adapter))
2963 adapter->cmd_privileges = MAX_PRIVILEGES;
2964 else
2965 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00002966}
2967
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002968static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2969 bool *active_mac, u32 *pmac_id)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002970{
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002971 int status = 0;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002972
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002973 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2974 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2975 if (!lancer_chip(adapter) && !be_physfn(adapter))
2976 *active_mac = true;
2977 else
2978 *active_mac = false;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002979
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002980 return status;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002981 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002982
2983 if (lancer_chip(adapter)) {
2984 status = be_cmd_get_mac_from_list(adapter, mac,
2985 active_mac, pmac_id, 0);
2986 if (*active_mac) {
Sathya Perla5ee49792012-09-28 04:39:41 +00002987 status = be_cmd_mac_addr_query(adapter, mac, false,
2988 if_handle, *pmac_id);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002989 }
2990 } else if (be_physfn(adapter)) {
2991 /* For BE3, for PF get permanent MAC */
Sathya Perla5ee49792012-09-28 04:39:41 +00002992 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002993 *active_mac = false;
2994 } else {
2995 /* For BE3, for VF get soft MAC assigned by PF*/
Sathya Perla5ee49792012-09-28 04:39:41 +00002996 status = be_cmd_mac_addr_query(adapter, mac, false,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002997 if_handle, 0);
2998 *active_mac = true;
2999 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003000 return status;
3001}
3002
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003003static void be_get_resources(struct be_adapter *adapter)
3004{
Sathya Perla4c876612013-02-03 20:30:11 +00003005 u16 dev_num_vfs;
3006 int pos, status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003007 bool profile_present = false;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003008 u16 txq_count = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003009
Sathya Perla4c876612013-02-03 20:30:11 +00003010 if (!BEx_chip(adapter)) {
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003011 status = be_cmd_get_func_config(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003012 if (!status)
3013 profile_present = true;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003014 } else if (BE3_chip(adapter) && be_physfn(adapter)) {
3015 be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003016 }
3017
3018 if (profile_present) {
3019 /* Sanity fixes for Lancer */
3020 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
3021 BE_UC_PMAC_COUNT);
3022 adapter->max_vlans = min_t(u16, adapter->max_vlans,
3023 BE_NUM_VLANS_SUPPORTED);
3024 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
3025 BE_MAX_MC);
3026 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3027 MAX_TX_QS);
3028 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
3029 BE3_MAX_RSS_QS);
3030 adapter->max_event_queues = min_t(u16,
3031 adapter->max_event_queues,
3032 BE3_MAX_RSS_QS);
3033
3034 if (adapter->max_rss_queues &&
3035 adapter->max_rss_queues == adapter->max_rx_queues)
3036 adapter->max_rss_queues -= 1;
3037
3038 if (adapter->max_event_queues < adapter->max_rss_queues)
3039 adapter->max_rss_queues = adapter->max_event_queues;
3040
3041 } else {
3042 if (be_physfn(adapter))
3043 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3044 else
3045 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3046
3047 if (adapter->function_mode & FLEX10_MODE)
3048 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3049 else
3050 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3051
3052 adapter->max_mcast_mac = BE_MAX_MC;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003053 adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
3054 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3055 MAX_TX_QS);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003056 adapter->max_rss_queues = (adapter->be3_native) ?
3057 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3058 adapter->max_event_queues = BE3_MAX_RSS_QS;
3059
3060 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
3061 BE_IF_FLAGS_BROADCAST |
3062 BE_IF_FLAGS_MULTICAST |
3063 BE_IF_FLAGS_PASS_L3L4_ERRORS |
3064 BE_IF_FLAGS_MCAST_PROMISCUOUS |
3065 BE_IF_FLAGS_VLAN_PROMISCUOUS |
3066 BE_IF_FLAGS_PROMISCUOUS;
3067
3068 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3069 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
3070 }
Sathya Perla4c876612013-02-03 20:30:11 +00003071
3072 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
3073 if (pos) {
3074 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
3075 &dev_num_vfs);
3076 if (BE3_chip(adapter))
3077 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
3078 adapter->dev_num_vfs = dev_num_vfs;
3079 }
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003080}
3081
Sathya Perla39f1d942012-05-08 19:41:24 +00003082/* Routine to query per function resource limits */
3083static int be_get_config(struct be_adapter *adapter)
3084{
Sathya Perla4c876612013-02-03 20:30:11 +00003085 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003086
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003087 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3088 &adapter->function_mode,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003089 &adapter->function_caps,
3090 &adapter->asic_rev);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003091 if (status)
3092 goto err;
3093
3094 be_get_resources(adapter);
3095
3096 /* primary mac needs 1 pmac entry */
3097 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3098 sizeof(u32), GFP_KERNEL);
3099 if (!adapter->pmac_id) {
3100 status = -ENOMEM;
3101 goto err;
3102 }
3103
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003104err:
3105 return status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003106}
3107
Sathya Perla5fb379e2009-06-18 00:02:59 +00003108static int be_setup(struct be_adapter *adapter)
3109{
Sathya Perla39f1d942012-05-08 19:41:24 +00003110 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003111 u32 en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003112 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003113 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003114 u8 mac[ETH_ALEN];
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003115 bool active_mac;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003116
Sathya Perla30128032011-11-10 19:17:57 +00003117 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003118
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003119 if (!lancer_chip(adapter))
3120 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003121
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003122 status = be_get_config(adapter);
3123 if (status)
3124 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003125
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003126 status = be_msix_enable(adapter);
3127 if (status)
3128 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003129
3130 status = be_evt_queues_create(adapter);
3131 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003132 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003133
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003134 status = be_tx_cqs_create(adapter);
3135 if (status)
3136 goto err;
3137
3138 status = be_rx_cqs_create(adapter);
3139 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003140 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003141
Sathya Perla5fb379e2009-06-18 00:02:59 +00003142 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003143 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003144 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003145
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003146 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3147 /* In UMC mode FW does not return right privileges.
3148 * Override with correct privilege equivalent to PF.
3149 */
3150 if (be_is_mc(adapter))
3151 adapter->cmd_privileges = MAX_PRIVILEGES;
3152
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003153 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3154 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00003155
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003156 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003157 en_flags |= BE_IF_FLAGS_RSS;
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003158
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003159 en_flags = en_flags & adapter->if_cap_flags;
Padmanabh Ratnakar0b13fb42012-07-18 02:51:58 +00003160
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003161 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003162 &adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003163 if (status != 0)
3164 goto err;
3165
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003166 memset(mac, 0, ETH_ALEN);
3167 active_mac = false;
3168 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3169 &active_mac, &adapter->pmac_id[0]);
3170 if (status != 0)
3171 goto err;
3172
3173 if (!active_mac) {
3174 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3175 &adapter->pmac_id[0], 0);
3176 if (status != 0)
3177 goto err;
3178 }
3179
3180 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3181 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3182 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003183 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00003184
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003185 status = be_tx_qs_create(adapter);
3186 if (status)
3187 goto err;
3188
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003189 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003190
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003191 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003192 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003193
3194 be_set_rx_mode(adapter->netdev);
3195
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003196 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003197
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003198 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3199 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003200 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003201
Sathya Perlab4c1df92013-05-08 02:05:47 +00003202 if (be_physfn(adapter)) {
Sathya Perla39f1d942012-05-08 19:41:24 +00003203 if (adapter->dev_num_vfs)
3204 be_vf_setup(adapter);
3205 else
3206 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003207 }
3208
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003209 status = be_cmd_get_phy_info(adapter);
3210 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003211 adapter->phy.fc_autoneg = 1;
3212
Sathya Perla191eb752012-02-23 18:50:13 +00003213 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3214 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003215 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003216err:
3217 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003218 return status;
3219}
3220
Ivan Vecera66268732011-12-08 01:31:21 +00003221#ifdef CONFIG_NET_POLL_CONTROLLER
3222static void be_netpoll(struct net_device *netdev)
3223{
3224 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003225 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003226 int i;
3227
Sathya Perlae49cc342012-11-27 19:50:02 +00003228 for_all_evt_queues(adapter, eqo, i) {
3229 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3230 napi_schedule(&eqo->napi);
3231 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003232
3233 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003234}
3235#endif
3236
Ajit Khaparde84517482009-09-04 03:12:16 +00003237#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003238char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3239
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003240static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003241 const u8 *p, u32 img_start, int image_size,
3242 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003243{
3244 u32 crc_offset;
3245 u8 flashed_crc[4];
3246 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003247
3248 crc_offset = hdr_size + img_start + image_size - 4;
3249
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003250 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003251
3252 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003253 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003254 if (status) {
3255 dev_err(&adapter->pdev->dev,
3256 "could not get crc from flash, not flashing redboot\n");
3257 return false;
3258 }
3259
3260 /*update redboot only if crc does not match*/
3261 if (!memcmp(flashed_crc, p, 4))
3262 return false;
3263 else
3264 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003265}
3266
Sathya Perla306f1342011-08-02 19:57:45 +00003267static bool phy_flashing_required(struct be_adapter *adapter)
3268{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003269 return (adapter->phy.phy_type == TN_8022 &&
3270 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003271}
3272
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003273static bool is_comp_in_ufi(struct be_adapter *adapter,
3274 struct flash_section_info *fsec, int type)
3275{
3276 int i = 0, img_type = 0;
3277 struct flash_section_info_g2 *fsec_g2 = NULL;
3278
Sathya Perlaca34fe32012-11-06 17:48:56 +00003279 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003280 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3281
3282 for (i = 0; i < MAX_FLASH_COMP; i++) {
3283 if (fsec_g2)
3284 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3285 else
3286 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3287
3288 if (img_type == type)
3289 return true;
3290 }
3291 return false;
3292
3293}
3294
3295struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3296 int header_size,
3297 const struct firmware *fw)
3298{
3299 struct flash_section_info *fsec = NULL;
3300 const u8 *p = fw->data;
3301
3302 p += header_size;
3303 while (p < (fw->data + fw->size)) {
3304 fsec = (struct flash_section_info *)p;
3305 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3306 return fsec;
3307 p += 32;
3308 }
3309 return NULL;
3310}
3311
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003312static int be_flash(struct be_adapter *adapter, const u8 *img,
3313 struct be_dma_mem *flash_cmd, int optype, int img_size)
3314{
3315 u32 total_bytes = 0, flash_op, num_bytes = 0;
3316 int status = 0;
3317 struct be_cmd_write_flashrom *req = flash_cmd->va;
3318
3319 total_bytes = img_size;
3320 while (total_bytes) {
3321 num_bytes = min_t(u32, 32*1024, total_bytes);
3322
3323 total_bytes -= num_bytes;
3324
3325 if (!total_bytes) {
3326 if (optype == OPTYPE_PHY_FW)
3327 flash_op = FLASHROM_OPER_PHY_FLASH;
3328 else
3329 flash_op = FLASHROM_OPER_FLASH;
3330 } else {
3331 if (optype == OPTYPE_PHY_FW)
3332 flash_op = FLASHROM_OPER_PHY_SAVE;
3333 else
3334 flash_op = FLASHROM_OPER_SAVE;
3335 }
3336
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003337 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003338 img += num_bytes;
3339 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3340 flash_op, num_bytes);
3341 if (status) {
3342 if (status == ILLEGAL_IOCTL_REQ &&
3343 optype == OPTYPE_PHY_FW)
3344 break;
3345 dev_err(&adapter->pdev->dev,
3346 "cmd to write to flash rom failed.\n");
3347 return status;
3348 }
3349 }
3350 return 0;
3351}
3352
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003353/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003354static int be_flash_BEx(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003355 const struct firmware *fw,
3356 struct be_dma_mem *flash_cmd,
3357 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003358
Ajit Khaparde84517482009-09-04 03:12:16 +00003359{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003360 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003361 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003362 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003363 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003364 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003365 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003366
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003367 struct flash_comp gen3_flash_types[] = {
3368 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3369 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3370 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3371 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3372 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3373 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3374 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3375 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3376 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3377 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3378 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3379 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3380 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3381 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3382 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3383 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3384 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3385 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3386 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3387 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003388 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003389
3390 struct flash_comp gen2_flash_types[] = {
3391 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3392 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3393 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3394 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3395 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3396 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3397 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3398 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3399 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3400 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3401 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3402 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3403 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3404 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3405 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3406 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003407 };
3408
Sathya Perlaca34fe32012-11-06 17:48:56 +00003409 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003410 pflashcomp = gen3_flash_types;
3411 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003412 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003413 } else {
3414 pflashcomp = gen2_flash_types;
3415 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003416 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003417 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003418
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003419 /* Get flash section info*/
3420 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3421 if (!fsec) {
3422 dev_err(&adapter->pdev->dev,
3423 "Invalid Cookie. UFI corrupted ?\n");
3424 return -1;
3425 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003426 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003427 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003428 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003429
3430 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3431 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3432 continue;
3433
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003434 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3435 !phy_flashing_required(adapter))
3436 continue;
3437
3438 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3439 redboot = be_flash_redboot(adapter, fw->data,
3440 pflashcomp[i].offset, pflashcomp[i].size,
3441 filehdr_size + img_hdrs_size);
3442 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003443 continue;
3444 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003445
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003446 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003447 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003448 if (p + pflashcomp[i].size > fw->data + fw->size)
3449 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003450
3451 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3452 pflashcomp[i].size);
3453 if (status) {
3454 dev_err(&adapter->pdev->dev,
3455 "Flashing section type %d failed.\n",
3456 pflashcomp[i].img_type);
3457 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003458 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003459 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003460 return 0;
3461}
3462
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003463static int be_flash_skyhawk(struct be_adapter *adapter,
3464 const struct firmware *fw,
3465 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003466{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003467 int status = 0, i, filehdr_size = 0;
3468 int img_offset, img_size, img_optype, redboot;
3469 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3470 const u8 *p = fw->data;
3471 struct flash_section_info *fsec = NULL;
3472
3473 filehdr_size = sizeof(struct flash_file_hdr_g3);
3474 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3475 if (!fsec) {
3476 dev_err(&adapter->pdev->dev,
3477 "Invalid Cookie. UFI corrupted ?\n");
3478 return -1;
3479 }
3480
3481 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3482 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3483 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3484
3485 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3486 case IMAGE_FIRMWARE_iSCSI:
3487 img_optype = OPTYPE_ISCSI_ACTIVE;
3488 break;
3489 case IMAGE_BOOT_CODE:
3490 img_optype = OPTYPE_REDBOOT;
3491 break;
3492 case IMAGE_OPTION_ROM_ISCSI:
3493 img_optype = OPTYPE_BIOS;
3494 break;
3495 case IMAGE_OPTION_ROM_PXE:
3496 img_optype = OPTYPE_PXE_BIOS;
3497 break;
3498 case IMAGE_OPTION_ROM_FCoE:
3499 img_optype = OPTYPE_FCOE_BIOS;
3500 break;
3501 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3502 img_optype = OPTYPE_ISCSI_BACKUP;
3503 break;
3504 case IMAGE_NCSI:
3505 img_optype = OPTYPE_NCSI_FW;
3506 break;
3507 default:
3508 continue;
3509 }
3510
3511 if (img_optype == OPTYPE_REDBOOT) {
3512 redboot = be_flash_redboot(adapter, fw->data,
3513 img_offset, img_size,
3514 filehdr_size + img_hdrs_size);
3515 if (!redboot)
3516 continue;
3517 }
3518
3519 p = fw->data;
3520 p += filehdr_size + img_offset + img_hdrs_size;
3521 if (p + img_size > fw->data + fw->size)
3522 return -1;
3523
3524 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3525 if (status) {
3526 dev_err(&adapter->pdev->dev,
3527 "Flashing section type %d failed.\n",
3528 fsec->fsec_entry[i].type);
3529 return status;
3530 }
3531 }
3532 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003533}
3534
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003535static int lancer_fw_download(struct be_adapter *adapter,
3536 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003537{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003538#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3539#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3540 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003541 const u8 *data_ptr = NULL;
3542 u8 *dest_image_ptr = NULL;
3543 size_t image_size = 0;
3544 u32 chunk_size = 0;
3545 u32 data_written = 0;
3546 u32 offset = 0;
3547 int status = 0;
3548 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003549 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003550
3551 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3552 dev_err(&adapter->pdev->dev,
3553 "FW Image not properly aligned. "
3554 "Length must be 4 byte aligned.\n");
3555 status = -EINVAL;
3556 goto lancer_fw_exit;
3557 }
3558
3559 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3560 + LANCER_FW_DOWNLOAD_CHUNK;
3561 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00003562 &flash_cmd.dma, GFP_KERNEL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003563 if (!flash_cmd.va) {
3564 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003565 goto lancer_fw_exit;
3566 }
3567
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003568 dest_image_ptr = flash_cmd.va +
3569 sizeof(struct lancer_cmd_req_write_object);
3570 image_size = fw->size;
3571 data_ptr = fw->data;
3572
3573 while (image_size) {
3574 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3575
3576 /* Copy the image chunk content. */
3577 memcpy(dest_image_ptr, data_ptr, chunk_size);
3578
3579 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003580 chunk_size, offset,
3581 LANCER_FW_DOWNLOAD_LOCATION,
3582 &data_written, &change_status,
3583 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003584 if (status)
3585 break;
3586
3587 offset += data_written;
3588 data_ptr += data_written;
3589 image_size -= data_written;
3590 }
3591
3592 if (!status) {
3593 /* Commit the FW written */
3594 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003595 0, offset,
3596 LANCER_FW_DOWNLOAD_LOCATION,
3597 &data_written, &change_status,
3598 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003599 }
3600
3601 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3602 flash_cmd.dma);
3603 if (status) {
3604 dev_err(&adapter->pdev->dev,
3605 "Firmware load error. "
3606 "Status code: 0x%x Additional Status: 0x%x\n",
3607 status, add_status);
3608 goto lancer_fw_exit;
3609 }
3610
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003611 if (change_status == LANCER_FW_RESET_NEEDED) {
Somnath Kotur5c510812013-05-30 02:52:23 +00003612 status = lancer_physdev_ctrl(adapter,
3613 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003614 if (status) {
3615 dev_err(&adapter->pdev->dev,
3616 "Adapter busy for FW reset.\n"
3617 "New FW will not be active.\n");
3618 goto lancer_fw_exit;
3619 }
3620 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3621 dev_err(&adapter->pdev->dev,
3622 "System reboot required for new FW"
3623 " to be active\n");
3624 }
3625
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003626 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3627lancer_fw_exit:
3628 return status;
3629}
3630
Sathya Perlaca34fe32012-11-06 17:48:56 +00003631#define UFI_TYPE2 2
3632#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003633#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00003634#define UFI_TYPE4 4
3635static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003636 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003637{
3638 if (fhdr == NULL)
3639 goto be_get_ufi_exit;
3640
Sathya Perlaca34fe32012-11-06 17:48:56 +00003641 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3642 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003643 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3644 if (fhdr->asic_type_rev == 0x10)
3645 return UFI_TYPE3R;
3646 else
3647 return UFI_TYPE3;
3648 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00003649 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003650
3651be_get_ufi_exit:
3652 dev_err(&adapter->pdev->dev,
3653 "UFI and Interface are not compatible for flashing\n");
3654 return -1;
3655}
3656
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003657static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3658{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003659 struct flash_file_hdr_g3 *fhdr3;
3660 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003661 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003662 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003663 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003664
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003665 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003666 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3667 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003668 if (!flash_cmd.va) {
3669 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003670 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003671 }
3672
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003673 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003674 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003675
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003676 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003677
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003678 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3679 for (i = 0; i < num_imgs; i++) {
3680 img_hdr_ptr = (struct image_hdr *)(fw->data +
3681 (sizeof(struct flash_file_hdr_g3) +
3682 i * sizeof(struct image_hdr)));
3683 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003684 switch (ufi_type) {
3685 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003686 status = be_flash_skyhawk(adapter, fw,
3687 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003688 break;
3689 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00003690 status = be_flash_BEx(adapter, fw, &flash_cmd,
3691 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003692 break;
3693 case UFI_TYPE3:
3694 /* Do not flash this ufi on BE3-R cards */
3695 if (adapter->asic_rev < 0x10)
3696 status = be_flash_BEx(adapter, fw,
3697 &flash_cmd,
3698 num_imgs);
3699 else {
3700 status = -1;
3701 dev_err(&adapter->pdev->dev,
3702 "Can't load BE3 UFI on BE3R\n");
3703 }
3704 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003705 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003706 }
3707
Sathya Perlaca34fe32012-11-06 17:48:56 +00003708 if (ufi_type == UFI_TYPE2)
3709 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003710 else if (ufi_type == -1)
3711 status = -1;
3712
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003713 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3714 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003715 if (status) {
3716 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003717 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003718 }
3719
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003720 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003721
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003722be_fw_exit:
3723 return status;
3724}
3725
3726int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3727{
3728 const struct firmware *fw;
3729 int status;
3730
3731 if (!netif_running(adapter->netdev)) {
3732 dev_err(&adapter->pdev->dev,
3733 "Firmware load not allowed (interface is down)\n");
3734 return -1;
3735 }
3736
3737 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3738 if (status)
3739 goto fw_exit;
3740
3741 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3742
3743 if (lancer_chip(adapter))
3744 status = lancer_fw_download(adapter, fw);
3745 else
3746 status = be_fw_download(adapter, fw);
3747
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003748 if (!status)
3749 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3750 adapter->fw_on_flash);
3751
Ajit Khaparde84517482009-09-04 03:12:16 +00003752fw_exit:
3753 release_firmware(fw);
3754 return status;
3755}
3756
stephen hemmingere5686ad2012-01-05 19:10:25 +00003757static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003758 .ndo_open = be_open,
3759 .ndo_stop = be_close,
3760 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003761 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003762 .ndo_set_mac_address = be_mac_addr_set,
3763 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003764 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003765 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003766 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3767 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003768 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003769 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003770 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003771 .ndo_get_vf_config = be_get_vf_config,
3772#ifdef CONFIG_NET_POLL_CONTROLLER
3773 .ndo_poll_controller = be_netpoll,
3774#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003775};
3776
3777static void be_netdev_init(struct net_device *netdev)
3778{
3779 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003780 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003781 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003782
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003783 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003784 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00003785 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003786 if (be_multi_rxq(adapter))
3787 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003788
3789 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00003790 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003791
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003792 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003793 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003794
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003795 netdev->priv_flags |= IFF_UNICAST_FLT;
3796
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003797 netdev->flags |= IFF_MULTICAST;
3798
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00003799 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003800
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003801 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003802
3803 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3804
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003805 for_all_evt_queues(adapter, eqo, i)
3806 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003807}
3808
3809static void be_unmap_pci_bars(struct be_adapter *adapter)
3810{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003811 if (adapter->csr)
3812 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003813 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00003814 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003815}
3816
Sathya Perlace66f782012-11-06 17:48:58 +00003817static int db_bar(struct be_adapter *adapter)
3818{
3819 if (lancer_chip(adapter) || !be_physfn(adapter))
3820 return 0;
3821 else
3822 return 4;
3823}
3824
3825static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00003826{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00003827 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00003828 adapter->roce_db.size = 4096;
3829 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3830 db_bar(adapter));
3831 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3832 db_bar(adapter));
3833 }
Parav Pandit045508a2012-03-26 14:27:13 +00003834 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003835}
3836
3837static int be_map_pci_bars(struct be_adapter *adapter)
3838{
3839 u8 __iomem *addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003840 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003841
Sathya Perlace66f782012-11-06 17:48:58 +00003842 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3843 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3844 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003845
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003846 if (BEx_chip(adapter) && be_physfn(adapter)) {
3847 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3848 if (adapter->csr == NULL)
3849 return -ENOMEM;
3850 }
3851
Sathya Perlace66f782012-11-06 17:48:58 +00003852 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003853 if (addr == NULL)
3854 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003855 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003856
3857 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003858 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00003859
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003860pci_map_err:
3861 be_unmap_pci_bars(adapter);
3862 return -ENOMEM;
3863}
3864
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003865static void be_ctrl_cleanup(struct be_adapter *adapter)
3866{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003867 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003868
3869 be_unmap_pci_bars(adapter);
3870
3871 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003872 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3873 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003874
Sathya Perla5b8821b2011-08-02 19:57:44 +00003875 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003876 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003877 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3878 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003879}
3880
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003881static int be_ctrl_init(struct be_adapter *adapter)
3882{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003883 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3884 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003885 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00003886 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003887 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003888
Sathya Perlace66f782012-11-06 17:48:58 +00003889 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3890 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3891 SLI_INTF_FAMILY_SHIFT;
3892 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3893
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003894 status = be_map_pci_bars(adapter);
3895 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003896 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003897
3898 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003899 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3900 mbox_mem_alloc->size,
3901 &mbox_mem_alloc->dma,
3902 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003903 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003904 status = -ENOMEM;
3905 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003906 }
3907 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3908 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3909 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3910 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003911
Sathya Perla5b8821b2011-08-02 19:57:44 +00003912 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3913 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
Joe Perches1f9061d22013-03-15 07:23:58 +00003914 &rx_filter->dma,
3915 GFP_KERNEL | __GFP_ZERO);
Sathya Perla5b8821b2011-08-02 19:57:44 +00003916 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003917 status = -ENOMEM;
3918 goto free_mbox;
3919 }
Joe Perches1f9061d22013-03-15 07:23:58 +00003920
Ivan Vecera29849612010-12-14 05:43:19 +00003921 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003922 spin_lock_init(&adapter->mcc_lock);
3923 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003924
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003925 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003926 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003927 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003928
3929free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003930 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3931 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003932
3933unmap_pci_bars:
3934 be_unmap_pci_bars(adapter);
3935
3936done:
3937 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003938}
3939
3940static void be_stats_cleanup(struct be_adapter *adapter)
3941{
Sathya Perla3abcded2010-10-03 22:12:27 -07003942 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003943
3944 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003945 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3946 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003947}
3948
3949static int be_stats_init(struct be_adapter *adapter)
3950{
Sathya Perla3abcded2010-10-03 22:12:27 -07003951 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003952
Sathya Perlaca34fe32012-11-06 17:48:56 +00003953 if (lancer_chip(adapter))
3954 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3955 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003956 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Sathya Perlaca34fe32012-11-06 17:48:56 +00003957 else
3958 /* BE3 and Skyhawk */
3959 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3960
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003961 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
Joe Perches1f9061d22013-03-15 07:23:58 +00003962 GFP_KERNEL | __GFP_ZERO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003963 if (cmd->va == NULL)
3964 return -1;
3965 return 0;
3966}
3967
Bill Pemberton3bc6b062012-12-03 09:23:09 -05003968static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003969{
3970 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003971
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003972 if (!adapter)
3973 return;
3974
Parav Pandit045508a2012-03-26 14:27:13 +00003975 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00003976 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00003977
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003978 cancel_delayed_work_sync(&adapter->func_recovery_work);
3979
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003980 unregister_netdev(adapter->netdev);
3981
Sathya Perla5fb379e2009-06-18 00:02:59 +00003982 be_clear(adapter);
3983
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00003984 /* tell fw we're done with firing cmds */
3985 be_cmd_fw_clean(adapter);
3986
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003987 be_stats_cleanup(adapter);
3988
3989 be_ctrl_cleanup(adapter);
3990
Sathya Perlad6b6d982012-09-05 01:56:48 +00003991 pci_disable_pcie_error_reporting(pdev);
3992
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003993 pci_set_drvdata(pdev, NULL);
3994 pci_release_regions(pdev);
3995 pci_disable_device(pdev);
3996
3997 free_netdev(adapter->netdev);
3998}
3999
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004000bool be_is_wol_supported(struct be_adapter *adapter)
4001{
4002 return ((adapter->wol_cap & BE_WOL_CAP) &&
4003 !be_is_wol_excluded(adapter)) ? true : false;
4004}
4005
Somnath Kotur941a77d2012-05-17 22:59:03 +00004006u32 be_get_fw_log_level(struct be_adapter *adapter)
4007{
4008 struct be_dma_mem extfat_cmd;
4009 struct be_fat_conf_params *cfgs;
4010 int status;
4011 u32 level = 0;
4012 int j;
4013
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004014 if (lancer_chip(adapter))
4015 return 0;
4016
Somnath Kotur941a77d2012-05-17 22:59:03 +00004017 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4018 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4019 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4020 &extfat_cmd.dma);
4021
4022 if (!extfat_cmd.va) {
4023 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4024 __func__);
4025 goto err;
4026 }
4027
4028 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4029 if (!status) {
4030 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4031 sizeof(struct be_cmd_resp_hdr));
Anton Blanchardac46a462012-07-24 15:05:25 +00004032 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
Somnath Kotur941a77d2012-05-17 22:59:03 +00004033 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4034 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4035 }
4036 }
4037 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4038 extfat_cmd.dma);
4039err:
4040 return level;
4041}
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004042
Sathya Perla39f1d942012-05-08 19:41:24 +00004043static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004044{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004045 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00004046 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004047
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004048 status = be_cmd_get_cntl_attributes(adapter);
4049 if (status)
4050 return status;
4051
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004052 status = be_cmd_get_acpi_wol_cap(adapter);
4053 if (status) {
4054 /* in case of a failure to get wol capabillities
4055 * check the exclusion list to determine WOL capability */
4056 if (!be_is_wol_excluded(adapter))
4057 adapter->wol_cap |= BE_WOL_CAP;
4058 }
4059
4060 if (be_is_wol_supported(adapter))
4061 adapter->wol = true;
4062
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004063 /* Must be a power of 2 or else MODULO will BUG_ON */
4064 adapter->be_get_temp_freq = 64;
4065
Somnath Kotur941a77d2012-05-17 22:59:03 +00004066 level = be_get_fw_log_level(adapter);
4067 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4068
Sathya Perla2243e2e2009-11-22 22:02:03 +00004069 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004070}
4071
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004072static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004073{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004074 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004075 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004076
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004077 status = lancer_test_and_set_rdy_state(adapter);
4078 if (status)
4079 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004080
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004081 if (netif_running(adapter->netdev))
4082 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004083
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004084 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004085
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004086 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004087
4088 status = be_setup(adapter);
4089 if (status)
4090 goto err;
4091
4092 if (netif_running(adapter->netdev)) {
4093 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004094 if (status)
4095 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004096 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004097
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004098 dev_err(dev, "Error recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004099 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004100err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004101 if (status == -EAGAIN)
4102 dev_err(dev, "Waiting for resource provisioning\n");
4103 else
4104 dev_err(dev, "Error recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004105
4106 return status;
4107}
4108
4109static void be_func_recovery_task(struct work_struct *work)
4110{
4111 struct be_adapter *adapter =
4112 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004113 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004114
4115 be_detect_error(adapter);
4116
4117 if (adapter->hw_error && lancer_chip(adapter)) {
4118
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004119 rtnl_lock();
4120 netif_device_detach(adapter->netdev);
4121 rtnl_unlock();
4122
4123 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004124 if (!status)
4125 netif_device_attach(adapter->netdev);
4126 }
4127
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004128 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4129 * no need to attempt further recovery.
4130 */
4131 if (!status || status == -EAGAIN)
4132 schedule_delayed_work(&adapter->func_recovery_work,
4133 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004134}
4135
4136static void be_worker(struct work_struct *work)
4137{
4138 struct be_adapter *adapter =
4139 container_of(work, struct be_adapter, work.work);
4140 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004141 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004142 int i;
4143
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004144 /* when interrupts are not yet enabled, just reap any pending
4145 * mcc completions */
4146 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004147 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004148 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004149 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004150 goto reschedule;
4151 }
4152
4153 if (!adapter->stats_cmd_sent) {
4154 if (lancer_chip(adapter))
4155 lancer_cmd_get_pport_stats(adapter,
4156 &adapter->stats_cmd);
4157 else
4158 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4159 }
4160
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004161 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4162 be_cmd_get_die_temperature(adapter);
4163
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004164 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004165 if (rxo->rx_post_starved) {
4166 rxo->rx_post_starved = false;
4167 be_post_rx_frags(rxo, GFP_KERNEL);
4168 }
4169 }
4170
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004171 for_all_evt_queues(adapter, eqo, i)
4172 be_eqd_update(adapter, eqo);
4173
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004174reschedule:
4175 adapter->work_counter++;
4176 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4177}
4178
Sathya Perla257a3fe2013-06-14 15:54:51 +05304179/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00004180static bool be_reset_required(struct be_adapter *adapter)
4181{
Sathya Perla257a3fe2013-06-14 15:54:51 +05304182 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004183}
4184
Sathya Perlad3791422012-09-28 04:39:44 +00004185static char *mc_name(struct be_adapter *adapter)
4186{
4187 if (adapter->function_mode & FLEX10_MODE)
4188 return "FLEX10";
4189 else if (adapter->function_mode & VNIC_MODE)
4190 return "vNIC";
4191 else if (adapter->function_mode & UMC_ENABLED)
4192 return "UMC";
4193 else
4194 return "";
4195}
4196
4197static inline char *func_name(struct be_adapter *adapter)
4198{
4199 return be_physfn(adapter) ? "PF" : "VF";
4200}
4201
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004202static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004203{
4204 int status = 0;
4205 struct be_adapter *adapter;
4206 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004207 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004208
4209 status = pci_enable_device(pdev);
4210 if (status)
4211 goto do_none;
4212
4213 status = pci_request_regions(pdev, DRV_NAME);
4214 if (status)
4215 goto disable_dev;
4216 pci_set_master(pdev);
4217
Sathya Perla7f640062012-06-05 19:37:20 +00004218 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004219 if (netdev == NULL) {
4220 status = -ENOMEM;
4221 goto rel_reg;
4222 }
4223 adapter = netdev_priv(netdev);
4224 adapter->pdev = pdev;
4225 pci_set_drvdata(pdev, adapter);
4226 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004227 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004228
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004229 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004230 if (!status) {
Craig Hada2bd92cd2013-04-21 23:28:18 +00004231 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4232 if (status < 0) {
4233 dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4234 goto free_netdev;
4235 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004236 netdev->features |= NETIF_F_HIGHDMA;
4237 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004238 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Somnath Kotur0c5fed02013-06-11 17:18:22 +05304239 if (!status)
4240 status = dma_set_coherent_mask(&pdev->dev,
4241 DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004242 if (status) {
4243 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4244 goto free_netdev;
4245 }
4246 }
4247
Sathya Perlad6b6d982012-09-05 01:56:48 +00004248 status = pci_enable_pcie_error_reporting(pdev);
4249 if (status)
4250 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4251
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004252 status = be_ctrl_init(adapter);
4253 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004254 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004255
Sathya Perla2243e2e2009-11-22 22:02:03 +00004256 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004257 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004258 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004259 if (status)
4260 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004261 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004262
Sathya Perla39f1d942012-05-08 19:41:24 +00004263 if (be_reset_required(adapter)) {
4264 status = be_cmd_reset_function(adapter);
4265 if (status)
4266 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004267
Kalesh AP2d177be2013-04-28 22:22:29 +00004268 /* Wait for interrupts to quiesce after an FLR */
4269 msleep(100);
4270 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004271
4272 /* Allow interrupts for other ULPs running on NIC function */
4273 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004274
Kalesh AP2d177be2013-04-28 22:22:29 +00004275 /* tell fw we're ready to fire cmds */
4276 status = be_cmd_fw_init(adapter);
4277 if (status)
4278 goto ctrl_clean;
4279
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004280 status = be_stats_init(adapter);
4281 if (status)
4282 goto ctrl_clean;
4283
Sathya Perla39f1d942012-05-08 19:41:24 +00004284 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004285 if (status)
4286 goto stats_clean;
4287
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004288 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004289 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004290 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004291
Sathya Perla5fb379e2009-06-18 00:02:59 +00004292 status = be_setup(adapter);
4293 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004294 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004295
Sathya Perla3abcded2010-10-03 22:12:27 -07004296 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004297 status = register_netdev(netdev);
4298 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004299 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004300
Parav Pandit045508a2012-03-26 14:27:13 +00004301 be_roce_dev_add(adapter);
4302
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004303 schedule_delayed_work(&adapter->func_recovery_work,
4304 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004305
4306 be_cmd_query_port_name(adapter, &port_name);
4307
Sathya Perlad3791422012-09-28 04:39:44 +00004308 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4309 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004310
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004311 return 0;
4312
Sathya Perla5fb379e2009-06-18 00:02:59 +00004313unsetup:
4314 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004315stats_clean:
4316 be_stats_cleanup(adapter);
4317ctrl_clean:
4318 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004319free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004320 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004321 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004322rel_reg:
4323 pci_release_regions(pdev);
4324disable_dev:
4325 pci_disable_device(pdev);
4326do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004327 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004328 return status;
4329}
4330
4331static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4332{
4333 struct be_adapter *adapter = pci_get_drvdata(pdev);
4334 struct net_device *netdev = adapter->netdev;
4335
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004336 if (adapter->wol)
4337 be_setup_wol(adapter, true);
4338
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004339 cancel_delayed_work_sync(&adapter->func_recovery_work);
4340
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004341 netif_device_detach(netdev);
4342 if (netif_running(netdev)) {
4343 rtnl_lock();
4344 be_close(netdev);
4345 rtnl_unlock();
4346 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004347 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004348
4349 pci_save_state(pdev);
4350 pci_disable_device(pdev);
4351 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4352 return 0;
4353}
4354
4355static int be_resume(struct pci_dev *pdev)
4356{
4357 int status = 0;
4358 struct be_adapter *adapter = pci_get_drvdata(pdev);
4359 struct net_device *netdev = adapter->netdev;
4360
4361 netif_device_detach(netdev);
4362
4363 status = pci_enable_device(pdev);
4364 if (status)
4365 return status;
4366
4367 pci_set_power_state(pdev, 0);
4368 pci_restore_state(pdev);
4369
Sathya Perla2243e2e2009-11-22 22:02:03 +00004370 /* tell fw we're ready to fire cmds */
4371 status = be_cmd_fw_init(adapter);
4372 if (status)
4373 return status;
4374
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004375 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004376 if (netif_running(netdev)) {
4377 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004378 be_open(netdev);
4379 rtnl_unlock();
4380 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004381
4382 schedule_delayed_work(&adapter->func_recovery_work,
4383 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004384 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004385
4386 if (adapter->wol)
4387 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004388
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004389 return 0;
4390}
4391
Sathya Perla82456b02010-02-17 01:35:37 +00004392/*
4393 * An FLR will stop BE from DMAing any data.
4394 */
4395static void be_shutdown(struct pci_dev *pdev)
4396{
4397 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004398
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004399 if (!adapter)
4400 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004401
Sathya Perla0f4a6822011-03-21 20:49:28 +00004402 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004403 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004404
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004405 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004406
Ajit Khaparde57841862011-04-06 18:08:43 +00004407 be_cmd_reset_function(adapter);
4408
Sathya Perla82456b02010-02-17 01:35:37 +00004409 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004410}
4411
Sathya Perlacf588472010-02-14 21:22:01 +00004412static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4413 pci_channel_state_t state)
4414{
4415 struct be_adapter *adapter = pci_get_drvdata(pdev);
4416 struct net_device *netdev = adapter->netdev;
4417
4418 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4419
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004420 if (!adapter->eeh_error) {
4421 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004422
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004423 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004424
Sathya Perlacf588472010-02-14 21:22:01 +00004425 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004426 netif_device_detach(netdev);
4427 if (netif_running(netdev))
4428 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004429 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004430
4431 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004432 }
Sathya Perlacf588472010-02-14 21:22:01 +00004433
4434 if (state == pci_channel_io_perm_failure)
4435 return PCI_ERS_RESULT_DISCONNECT;
4436
4437 pci_disable_device(pdev);
4438
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004439 /* The error could cause the FW to trigger a flash debug dump.
4440 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004441 * can cause it not to recover; wait for it to finish.
4442 * Wait only for first function as it is needed only once per
4443 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004444 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004445 if (pdev->devfn == 0)
4446 ssleep(30);
4447
Sathya Perlacf588472010-02-14 21:22:01 +00004448 return PCI_ERS_RESULT_NEED_RESET;
4449}
4450
4451static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4452{
4453 struct be_adapter *adapter = pci_get_drvdata(pdev);
4454 int status;
4455
4456 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004457
4458 status = pci_enable_device(pdev);
4459 if (status)
4460 return PCI_ERS_RESULT_DISCONNECT;
4461
4462 pci_set_master(pdev);
4463 pci_set_power_state(pdev, 0);
4464 pci_restore_state(pdev);
4465
4466 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004467 dev_info(&adapter->pdev->dev,
4468 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004469 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004470 if (status)
4471 return PCI_ERS_RESULT_DISCONNECT;
4472
Sathya Perlad6b6d982012-09-05 01:56:48 +00004473 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004474 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004475 return PCI_ERS_RESULT_RECOVERED;
4476}
4477
4478static void be_eeh_resume(struct pci_dev *pdev)
4479{
4480 int status = 0;
4481 struct be_adapter *adapter = pci_get_drvdata(pdev);
4482 struct net_device *netdev = adapter->netdev;
4483
4484 dev_info(&adapter->pdev->dev, "EEH resume\n");
4485
4486 pci_save_state(pdev);
4487
Kalesh AP2d177be2013-04-28 22:22:29 +00004488 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004489 if (status)
4490 goto err;
4491
Kalesh AP2d177be2013-04-28 22:22:29 +00004492 /* tell fw we're ready to fire cmds */
4493 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004494 if (status)
4495 goto err;
4496
Sathya Perlacf588472010-02-14 21:22:01 +00004497 status = be_setup(adapter);
4498 if (status)
4499 goto err;
4500
4501 if (netif_running(netdev)) {
4502 status = be_open(netdev);
4503 if (status)
4504 goto err;
4505 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004506
4507 schedule_delayed_work(&adapter->func_recovery_work,
4508 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004509 netif_device_attach(netdev);
4510 return;
4511err:
4512 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004513}
4514
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004515static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004516 .error_detected = be_eeh_err_detected,
4517 .slot_reset = be_eeh_reset,
4518 .resume = be_eeh_resume,
4519};
4520
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004521static struct pci_driver be_driver = {
4522 .name = DRV_NAME,
4523 .id_table = be_dev_ids,
4524 .probe = be_probe,
4525 .remove = be_remove,
4526 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004527 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004528 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004529 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004530};
4531
4532static int __init be_init_module(void)
4533{
Joe Perches8e95a202009-12-03 07:58:21 +00004534 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4535 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004536 printk(KERN_WARNING DRV_NAME
4537 " : Module param rx_frag_size must be 2048/4096/8192."
4538 " Using 2048\n");
4539 rx_frag_size = 2048;
4540 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004541
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004542 return pci_register_driver(&be_driver);
4543}
4544module_init(be_init_module);
4545
4546static void __exit be_exit_module(void)
4547{
4548 pci_unregister_driver(&be_driver);
4549}
4550module_exit(be_exit_module);