blob: ca2967b0f18b672bb0af9ca25eaeb586f3baa47e [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamc7bb15a2013-03-06 20:05:05 +00002 * Copyright (C) 2005 - 2013 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070024
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000028MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_LICENSE("GPL");
30
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla11ac75e2011-12-13 00:58:50 +000035static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070048 { 0 }
49};
50MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000051/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070052static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000053 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85};
86/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070087static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000088 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700111 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700121
Sathya Perla752961a2011-10-24 02:45:03 +0000122/* Is BE in a multi-channel mode */
123static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127}
128
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700129static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130{
131 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000135 mem->va = NULL;
136 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
Joe Perches1f9061d22013-03-15 07:23:58 +0000149 GFP_KERNEL | __GFP_ZERO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000151 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 return 0;
153}
154
Somnath Kotur68c45a22013-03-14 02:42:07 +0000155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169
Sathya Perladb3ea782011-08-22 19:41:52 +0000170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Somnath Kotur68c45a22013-03-14 02:42:07 +0000174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
Sathya Perla8788fdc2009-07-27 22:52:03 +0000190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700191{
192 u32 val = 0;
193 val |= qid & DB_RQ_RING_ID_MASK;
194 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000195
196 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000197 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700198}
199
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000200static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
201 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202{
203 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000206
207 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000208 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209}
210
Sathya Perla8788fdc2009-07-27 22:52:03 +0000211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700212 bool arm, bool clear_int, u16 num_popped)
213{
214 u32 val = 0;
215 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000216 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000218
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000219 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000220 return;
221
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700229}
230
Sathya Perla8788fdc2009-07-27 22:52:03 +0000231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232{
233 u32 val = 0;
234 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000235 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000237
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000238 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000239 return;
240
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241 if (arm)
242 val |= 1 << DB_CQ_REARM_SHIFT;
243 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000244 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245}
246
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247static int be_mac_addr_set(struct net_device *netdev, void *p)
248{
249 struct be_adapter *adapter = netdev_priv(netdev);
250 struct sockaddr *addr = p;
251 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000252 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000253 u32 pmac_id = adapter->pmac_id[0];
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000254 bool active_mac = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700255
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000256 if (!is_valid_ether_addr(addr->sa_data))
257 return -EADDRNOTAVAIL;
258
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000259 /* For BE VF, MAC address is already activated by PF.
260 * Hence only operation left is updating netdev->devaddr.
261 * Update it if user is passing the same MAC which was used
262 * during configuring VF MAC from PF(Hypervisor).
263 */
264 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
265 status = be_cmd_mac_addr_query(adapter, current_mac,
266 false, adapter->if_handle, 0);
267 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
268 goto done;
269 else
270 goto err;
271 }
272
273 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
274 goto done;
275
276 /* For Lancer check if any MAC is active.
277 * If active, get its mac id.
278 */
279 if (lancer_chip(adapter) && !be_physfn(adapter))
280 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
281 &pmac_id, 0);
282
283 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
284 adapter->if_handle,
285 &adapter->pmac_id[0], 0);
286
Sathya Perlaa65027e2009-08-17 00:58:04 +0000287 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000288 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700289
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000290 if (active_mac)
291 be_cmd_pmac_del(adapter, adapter->if_handle,
292 pmac_id, 0);
293done:
Somnath Koture3a7ae22011-10-27 07:14:05 +0000294 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
295 return 0;
296err:
297 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700298 return status;
299}
300
Sathya Perlaca34fe32012-11-06 17:48:56 +0000301/* BE2 supports only v0 cmd */
302static void *hw_stats_from_cmd(struct be_adapter *adapter)
303{
304 if (BE2_chip(adapter)) {
305 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
306
307 return &cmd->hw_stats;
308 } else {
309 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
310
311 return &cmd->hw_stats;
312 }
313}
314
315/* BE2 supports only v0 cmd */
316static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
317{
318 if (BE2_chip(adapter)) {
319 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
320
321 return &hw_stats->erx;
322 } else {
323 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
324
325 return &hw_stats->erx;
326 }
327}
328
329static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000330{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000334 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000335 &rxf_stats->port[adapter->port_num];
336 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000337
Sathya Perlaac124ff2011-07-25 19:10:14 +0000338 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000339 drvs->rx_pause_frames = port_stats->rx_pause_frames;
340 drvs->rx_crc_errors = port_stats->rx_crc_errors;
341 drvs->rx_control_frames = port_stats->rx_control_frames;
342 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000354 drvs->rx_dropped_header_too_small =
355 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000356 drvs->rx_address_filtered =
357 port_stats->rx_address_filtered +
358 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000359 drvs->rx_alignment_symbol_errors =
360 port_stats->rx_alignment_symbol_errors;
361
362 drvs->tx_pauseframes = port_stats->tx_pauseframes;
363 drvs->tx_controlframes = port_stats->tx_controlframes;
364
365 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000366 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000367 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000368 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000369 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000371 drvs->forwarded_packets = rxf_stats->forwarded_packets;
372 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000373 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000375 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
376}
377
Sathya Perlaca34fe32012-11-06 17:48:56 +0000378static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000379{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000380 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000383 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 &rxf_stats->port[adapter->port_num];
385 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386
Sathya Perlaac124ff2011-07-25 19:10:14 +0000387 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000388 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390 drvs->rx_pause_frames = port_stats->rx_pause_frames;
391 drvs->rx_crc_errors = port_stats->rx_crc_errors;
392 drvs->rx_control_frames = port_stats->rx_control_frames;
393 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403 drvs->rx_dropped_header_too_small =
404 port_stats->rx_dropped_header_too_small;
405 drvs->rx_input_fifo_overflow_drop =
406 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000407 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000408 drvs->rx_alignment_symbol_errors =
409 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000410 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000411 drvs->tx_pauseframes = port_stats->tx_pauseframes;
412 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000413 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000414 drvs->jabber_events = port_stats->jabber_events;
415 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000416 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000417 drvs->forwarded_packets = rxf_stats->forwarded_packets;
418 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000419 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
420 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
422}
423
Selvin Xavier005d5692011-05-16 07:36:35 +0000424static void populate_lancer_stats(struct be_adapter *adapter)
425{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000426
Selvin Xavier005d5692011-05-16 07:36:35 +0000427 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000428 struct lancer_pport_stats *pport_stats =
429 pport_stats_from_cmd(adapter);
430
431 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
432 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
433 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
434 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000435 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000436 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000437 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
438 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
439 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
440 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
441 drvs->rx_dropped_tcp_length =
442 pport_stats->rx_dropped_invalid_tcp_length;
443 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
444 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
445 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
446 drvs->rx_dropped_header_too_small =
447 pport_stats->rx_dropped_header_too_small;
448 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000449 drvs->rx_address_filtered =
450 pport_stats->rx_address_filtered +
451 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000452 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000453 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000454 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
455 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000456 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000457 drvs->forwarded_packets = pport_stats->num_forwards_lo;
458 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000459 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000460 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000461}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000462
Sathya Perla09c1c682011-08-22 19:41:53 +0000463static void accumulate_16bit_val(u32 *acc, u16 val)
464{
465#define lo(x) (x & 0xFFFF)
466#define hi(x) (x & 0xFFFF0000)
467 bool wrapped = val < lo(*acc);
468 u32 newacc = hi(*acc) + val;
469
470 if (wrapped)
471 newacc += 65536;
472 ACCESS_ONCE(*acc) = newacc;
473}
474
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000475void populate_erx_stats(struct be_adapter *adapter,
476 struct be_rx_obj *rxo,
477 u32 erx_stat)
478{
479 if (!BEx_chip(adapter))
480 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
481 else
482 /* below erx HW counter can actually wrap around after
483 * 65535. Driver accumulates a 32-bit value
484 */
485 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
486 (u16)erx_stat);
487}
488
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000489void be_parse_stats(struct be_adapter *adapter)
490{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000491 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
492 struct be_rx_obj *rxo;
493 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000494 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000495
Sathya Perlaca34fe32012-11-06 17:48:56 +0000496 if (lancer_chip(adapter)) {
497 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000498 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000499 if (BE2_chip(adapter))
500 populate_be_v0_stats(adapter);
501 else
502 /* for BE3 and Skyhawk */
503 populate_be_v1_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000504
Sathya Perlaca34fe32012-11-06 17:48:56 +0000505 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
506 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000507 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
508 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000509 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000510 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000511}
512
Sathya Perlaab1594e2011-07-25 19:10:15 +0000513static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
514 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700515{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000516 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000517 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700518 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000519 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000520 u64 pkts, bytes;
521 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700522 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700523
Sathya Perla3abcded2010-10-03 22:12:27 -0700524 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000525 const struct be_rx_stats *rx_stats = rx_stats(rxo);
526 do {
527 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
528 pkts = rx_stats(rxo)->rx_pkts;
529 bytes = rx_stats(rxo)->rx_bytes;
530 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
531 stats->rx_packets += pkts;
532 stats->rx_bytes += bytes;
533 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
534 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
535 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700536 }
537
Sathya Perla3c8def92011-06-12 20:01:58 +0000538 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000539 const struct be_tx_stats *tx_stats = tx_stats(txo);
540 do {
541 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
542 pkts = tx_stats(txo)->tx_pkts;
543 bytes = tx_stats(txo)->tx_bytes;
544 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
545 stats->tx_packets += pkts;
546 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000547 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700548
549 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000550 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000551 drvs->rx_alignment_symbol_errors +
552 drvs->rx_in_range_errors +
553 drvs->rx_out_range_errors +
554 drvs->rx_frame_too_long +
555 drvs->rx_dropped_too_small +
556 drvs->rx_dropped_too_short +
557 drvs->rx_dropped_header_too_small +
558 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000559 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700560
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700561 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000562 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000563 drvs->rx_out_range_errors +
564 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000565
Sathya Perlaab1594e2011-07-25 19:10:15 +0000566 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700567
568 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000569 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000570
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700571 /* receiver fifo overrun */
572 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000573 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000574 drvs->rx_input_fifo_overflow_drop +
575 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000576 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700577}
578
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000579void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700580{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700581 struct net_device *netdev = adapter->netdev;
582
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000583 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000584 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000585 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700586 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000587
588 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
589 netif_carrier_on(netdev);
590 else
591 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700592}
593
Sathya Perla3c8def92011-06-12 20:01:58 +0000594static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000595 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700596{
Sathya Perla3c8def92011-06-12 20:01:58 +0000597 struct be_tx_stats *stats = tx_stats(txo);
598
Sathya Perlaab1594e2011-07-25 19:10:15 +0000599 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000600 stats->tx_reqs++;
601 stats->tx_wrbs += wrb_cnt;
602 stats->tx_bytes += copied;
603 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700604 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000605 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000606 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700607}
608
609/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000610static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
611 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700612{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700613 int cnt = (skb->len > skb->data_len);
614
615 cnt += skb_shinfo(skb)->nr_frags;
616
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700617 /* to account for hdr wrb */
618 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000619 if (lancer_chip(adapter) || !(cnt & 1)) {
620 *dummy = false;
621 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700622 /* add a dummy to make it an even num */
623 cnt++;
624 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000625 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700626 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
627 return cnt;
628}
629
630static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
631{
632 wrb->frag_pa_hi = upper_32_bits(addr);
633 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
634 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000635 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636}
637
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000638static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
639 struct sk_buff *skb)
640{
641 u8 vlan_prio;
642 u16 vlan_tag;
643
644 vlan_tag = vlan_tx_tag_get(skb);
645 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
646 /* If vlan priority provided by OS is NOT in available bmap */
647 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
648 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
649 adapter->recommended_prio;
650
651 return vlan_tag;
652}
653
Somnath Koturcc4ce022010-10-21 07:11:14 -0700654static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000655 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700656{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000657 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700658
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700659 memset(hdr, 0, sizeof(*hdr));
660
661 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
662
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000663 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
666 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000667 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000668 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700669 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
670 if (is_tcp_pkt(skb))
671 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
672 else if (is_udp_pkt(skb))
673 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
674 }
675
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700676 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700677 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000678 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700679 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700680 }
681
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000682 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
683 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700685 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
686 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
687}
688
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000689static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000690 bool unmap_single)
691{
692 dma_addr_t dma;
693
694 be_dws_le_to_cpu(wrb, sizeof(*wrb));
695
696 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000697 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000698 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000699 dma_unmap_single(dev, dma, wrb->frag_len,
700 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000701 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000702 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000703 }
704}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700705
Sathya Perla3c8def92011-06-12 20:01:58 +0000706static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000707 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
708 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700709{
Sathya Perla7101e112010-03-22 20:41:12 +0000710 dma_addr_t busaddr;
711 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000712 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700713 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700714 struct be_eth_wrb *wrb;
715 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000716 bool map_single = false;
717 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700718
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700719 hdr = queue_head_node(txq);
720 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000721 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700722
David S. Millerebc8d2a2009-06-09 01:01:31 -0700723 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700724 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000725 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
726 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000727 goto dma_err;
728 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700729 wrb = queue_head_node(txq);
730 wrb_fill(wrb, busaddr, len);
731 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732 queue_head_inc(txq);
733 copied += len;
734 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700735
David S. Millerebc8d2a2009-06-09 01:01:31 -0700736 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000737 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700738 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000739 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000740 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000741 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000742 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700743 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000744 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700745 be_dws_cpu_to_le(wrb, sizeof(*wrb));
746 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000747 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700748 }
749
750 if (dummy_wrb) {
751 wrb = queue_head_node(txq);
752 wrb_fill(wrb, 0, 0);
753 be_dws_cpu_to_le(wrb, sizeof(*wrb));
754 queue_head_inc(txq);
755 }
756
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000757 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700758 be_dws_cpu_to_le(hdr, sizeof(*hdr));
759
760 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000761dma_err:
762 txq->head = map_head;
763 while (copied) {
764 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000765 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000766 map_single = false;
767 copied -= wrb->frag_len;
768 queue_head_inc(txq);
769 }
770 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700771}
772
Somnath Kotur93040ae2012-06-26 22:32:10 +0000773static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000774 struct sk_buff *skb,
775 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000776{
777 u16 vlan_tag = 0;
778
779 skb = skb_share_check(skb, GFP_ATOMIC);
780 if (unlikely(!skb))
781 return skb;
782
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000783 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000784 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000785 else if (qnq_async_evt_rcvd(adapter) && adapter->pvid)
786 vlan_tag = adapter->pvid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000787
788 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400789 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000790 if (unlikely(!skb))
791 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000792 skb->vlan_tci = 0;
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000793 if (skip_hw_vlan)
794 *skip_hw_vlan = true;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000795 }
796
797 /* Insert the outer VLAN, if any */
798 if (adapter->qnq_vid) {
799 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400800 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000801 if (unlikely(!skb))
802 return skb;
803 if (skip_hw_vlan)
804 *skip_hw_vlan = true;
805 }
806
Somnath Kotur93040ae2012-06-26 22:32:10 +0000807 return skb;
808}
809
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000810static bool be_ipv6_exthdr_check(struct sk_buff *skb)
811{
812 struct ethhdr *eh = (struct ethhdr *)skb->data;
813 u16 offset = ETH_HLEN;
814
815 if (eh->h_proto == htons(ETH_P_IPV6)) {
816 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
817
818 offset += sizeof(struct ipv6hdr);
819 if (ip6h->nexthdr != NEXTHDR_TCP &&
820 ip6h->nexthdr != NEXTHDR_UDP) {
821 struct ipv6_opt_hdr *ehdr =
822 (struct ipv6_opt_hdr *) (skb->data + offset);
823
824 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
825 if (ehdr->hdrlen == 0xff)
826 return true;
827 }
828 }
829 return false;
830}
831
832static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
833{
834 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
835}
836
837static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
838{
839 return BE3_chip(adapter) &&
840 be_ipv6_exthdr_check(skb);
841}
842
Stephen Hemminger613573252009-08-31 19:50:58 +0000843static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700844 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700845{
846 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000847 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
848 struct be_queue_info *txq = &txo->q;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000849 struct iphdr *ip = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700850 u32 wrb_cnt = 0, copied = 0;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000851 u32 start = txq->head, eth_hdr_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700852 bool dummy_wrb, stopped = false;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000853 bool skip_hw_vlan = false;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000854 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700855
Somnath Kotur93040ae2012-06-26 22:32:10 +0000856 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
857 VLAN_ETH_HLEN : ETH_HLEN;
858
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000859 /* For padded packets, BE HW modifies tot_len field in IP header
860 * incorrecly when VLAN tag is inserted by HW.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000861 */
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000862 if (skb->len <= 60 && vlan_tx_tag_present(skb) && is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000863 ip = (struct iphdr *)ip_hdr(skb);
864 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
865 }
866
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000867 /* If vlan tag is already inlined in the packet, skip HW VLAN
868 * tagging in UMC mode
869 */
870 if ((adapter->function_mode & UMC_ENABLED) &&
871 veh->h_vlan_proto == htons(ETH_P_8021Q))
872 skip_hw_vlan = true;
873
Somnath Kotur93040ae2012-06-26 22:32:10 +0000874 /* HW has a bug wherein it will calculate CSUM for VLAN
875 * pkts even though it is disabled.
876 * Manually insert VLAN in pkt.
877 */
878 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000879 vlan_tx_tag_present(skb)) {
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000880 skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
881 if (unlikely(!skb))
882 goto tx_drop;
883 }
884
885 /* HW may lockup when VLAN HW tagging is requested on
886 * certain ipv6 packets. Drop such pkts if the HW workaround to
887 * skip HW tagging is not enabled by FW.
888 */
889 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
890 (adapter->pvid || adapter->qnq_vid) &&
891 !qnq_async_evt_rcvd(adapter)))
892 goto tx_drop;
893
894 /* Manual VLAN tag insertion to prevent:
895 * ASIC lockup when the ASIC inserts VLAN tag into
896 * certain ipv6 packets. Insert VLAN tags in driver,
897 * and set event, completion, vlan bits accordingly
898 * in the Tx WRB.
899 */
900 if (be_ipv6_tx_stall_chk(adapter, skb) &&
901 be_vlan_tag_tx_chk(adapter, skb)) {
902 skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000903 if (unlikely(!skb))
904 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000905 }
906
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000907 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700908
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000909 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
910 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000911 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000912 int gso_segs = skb_shinfo(skb)->gso_segs;
913
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000914 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000915 BUG_ON(txo->sent_skb_list[start]);
916 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700917
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000918 /* Ensure txq has space for the next skb; Else stop the queue
919 * *BEFORE* ringing the tx doorbell, so that we serialze the
920 * tx compls of the current transmit which'll wake up the queue
921 */
Sathya Perla7101e112010-03-22 20:41:12 +0000922 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000923 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
924 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000925 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000926 stopped = true;
927 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700928
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000929 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000930
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000931 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000932 } else {
933 txq->head = start;
934 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700935 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000936tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700937 return NETDEV_TX_OK;
938}
939
940static int be_change_mtu(struct net_device *netdev, int new_mtu)
941{
942 struct be_adapter *adapter = netdev_priv(netdev);
943 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000944 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
945 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700946 dev_info(&adapter->pdev->dev,
947 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000948 BE_MIN_MTU,
949 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700950 return -EINVAL;
951 }
952 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
953 netdev->mtu, new_mtu);
954 netdev->mtu = new_mtu;
955 return 0;
956}
957
958/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000959 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
960 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700961 */
Sathya Perla10329df2012-06-05 19:37:18 +0000962static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700963{
Sathya Perla10329df2012-06-05 19:37:18 +0000964 u16 vids[BE_NUM_VLANS_SUPPORTED];
965 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000966 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000967
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000968 /* No need to further configure vids if in promiscuous mode */
969 if (adapter->promiscuous)
970 return 0;
971
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000972 if (adapter->vlans_added > adapter->max_vlans)
973 goto set_vlan_promisc;
974
975 /* Construct VLAN Table to give to HW */
976 for (i = 0; i < VLAN_N_VID; i++)
977 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +0000978 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000979
980 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +0000981 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000982
983 /* Set to VLAN promisc mode as setting VLAN filter failed */
984 if (status) {
985 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
986 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
987 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700988 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000989
Sathya Perlab31c50a2009-09-17 10:30:13 -0700990 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000991
992set_vlan_promisc:
993 status = be_cmd_vlan_config(adapter, adapter->if_handle,
994 NULL, 0, 1, 1);
995 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700996}
997
Patrick McHardy80d5c362013-04-19 02:04:28 +0000998static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700999{
1000 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001001 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001002
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001003 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001004 status = -EINVAL;
1005 goto ret;
1006 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001007
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001008 /* Packets with VID 0 are always received by Lancer by default */
1009 if (lancer_chip(adapter) && vid == 0)
1010 goto ret;
1011
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001012 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001013 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Sathya Perla10329df2012-06-05 19:37:18 +00001014 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001015
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001016 if (!status)
1017 adapter->vlans_added++;
1018 else
1019 adapter->vlan_tag[vid] = 0;
1020ret:
1021 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001022}
1023
Patrick McHardy80d5c362013-04-19 02:04:28 +00001024static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001025{
1026 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001027 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001028
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001029 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001030 status = -EINVAL;
1031 goto ret;
1032 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001033
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001034 /* Packets with VID 0 are always received by Lancer by default */
1035 if (lancer_chip(adapter) && vid == 0)
1036 goto ret;
1037
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001038 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001039 if (adapter->vlans_added <= adapter->max_vlans)
Sathya Perla10329df2012-06-05 19:37:18 +00001040 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001041
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001042 if (!status)
1043 adapter->vlans_added--;
1044 else
1045 adapter->vlan_tag[vid] = 1;
1046ret:
1047 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001048}
1049
Sathya Perlaa54769f2011-10-24 02:45:00 +00001050static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001051{
1052 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001053 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001054
1055 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001056 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001057 adapter->promiscuous = true;
1058 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001059 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001060
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001061 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001062 if (adapter->promiscuous) {
1063 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +00001064 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001065
1066 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001067 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001068 }
1069
Sathya Perlae7b909a2009-11-22 22:01:10 +00001070 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001071 if (netdev->flags & IFF_ALLMULTI ||
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001072 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001073 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001074 goto done;
1075 }
1076
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001077 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1078 struct netdev_hw_addr *ha;
1079 int i = 1; /* First slot is claimed by the Primary MAC */
1080
1081 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1082 be_cmd_pmac_del(adapter, adapter->if_handle,
1083 adapter->pmac_id[i], 0);
1084 }
1085
1086 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
1087 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1088 adapter->promiscuous = true;
1089 goto done;
1090 }
1091
1092 netdev_for_each_uc_addr(ha, adapter->netdev) {
1093 adapter->uc_macs++; /* First slot is for Primary MAC */
1094 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1095 adapter->if_handle,
1096 &adapter->pmac_id[adapter->uc_macs], 0);
1097 }
1098 }
1099
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001100 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1101
1102 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1103 if (status) {
1104 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1105 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1106 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1107 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001108done:
1109 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001110}
1111
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001112static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1113{
1114 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001115 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001116 int status;
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +00001117 bool active_mac = false;
1118 u32 pmac_id;
1119 u8 old_mac[ETH_ALEN];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001120
Sathya Perla11ac75e2011-12-13 00:58:50 +00001121 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001122 return -EPERM;
1123
Sathya Perla11ac75e2011-12-13 00:58:50 +00001124 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001125 return -EINVAL;
1126
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001127 if (lancer_chip(adapter)) {
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +00001128 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1129 &pmac_id, vf + 1);
1130 if (!status && active_mac)
1131 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1132 pmac_id, vf + 1);
1133
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001134 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1135 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +00001136 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1137 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001138
Sathya Perla11ac75e2011-12-13 00:58:50 +00001139 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1140 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001141 }
1142
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001143 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001144 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1145 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001146 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001147 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001148
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001149 return status;
1150}
1151
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001152static int be_get_vf_config(struct net_device *netdev, int vf,
1153 struct ifla_vf_info *vi)
1154{
1155 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001156 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001157
Sathya Perla11ac75e2011-12-13 00:58:50 +00001158 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001159 return -EPERM;
1160
Sathya Perla11ac75e2011-12-13 00:58:50 +00001161 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001162 return -EINVAL;
1163
1164 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001165 vi->tx_rate = vf_cfg->tx_rate;
1166 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001167 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001168 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001169
1170 return 0;
1171}
1172
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001173static int be_set_vf_vlan(struct net_device *netdev,
1174 int vf, u16 vlan, u8 qos)
1175{
1176 struct be_adapter *adapter = netdev_priv(netdev);
1177 int status = 0;
1178
Sathya Perla11ac75e2011-12-13 00:58:50 +00001179 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001180 return -EPERM;
1181
Sathya Perla11ac75e2011-12-13 00:58:50 +00001182 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001183 return -EINVAL;
1184
1185 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001186 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1187 /* If this is new value, program it. Else skip. */
1188 adapter->vf_cfg[vf].vlan_tag = vlan;
1189
1190 status = be_cmd_set_hsw_config(adapter, vlan,
1191 vf + 1, adapter->vf_cfg[vf].if_handle);
1192 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001193 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001194 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001195 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001196 vlan = adapter->vf_cfg[vf].def_vid;
1197 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1198 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001199 }
1200
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001201
1202 if (status)
1203 dev_info(&adapter->pdev->dev,
1204 "VLAN %d config on VF %d failed\n", vlan, vf);
1205 return status;
1206}
1207
Ajit Khapardee1d18732010-07-23 01:52:13 +00001208static int be_set_vf_tx_rate(struct net_device *netdev,
1209 int vf, int rate)
1210{
1211 struct be_adapter *adapter = netdev_priv(netdev);
1212 int status = 0;
1213
Sathya Perla11ac75e2011-12-13 00:58:50 +00001214 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001215 return -EPERM;
1216
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001217 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001218 return -EINVAL;
1219
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001220 if (rate < 100 || rate > 10000) {
1221 dev_err(&adapter->pdev->dev,
1222 "tx rate must be between 100 and 10000 Mbps\n");
1223 return -EINVAL;
1224 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001225
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001226 if (lancer_chip(adapter))
1227 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1228 else
1229 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001230
1231 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001232 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001233 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001234 else
1235 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001236 return status;
1237}
1238
Sathya Perla39f1d942012-05-08 19:41:24 +00001239static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1240{
1241 struct pci_dev *dev, *pdev = adapter->pdev;
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001242 int vfs = 0, assigned_vfs = 0, pos;
Sathya Perla39f1d942012-05-08 19:41:24 +00001243 u16 offset, stride;
1244
1245 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
Sathya Perlad79c0a22012-06-05 19:37:22 +00001246 if (!pos)
1247 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00001248 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1249 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1250
1251 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1252 while (dev) {
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001253 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
Sathya Perla39f1d942012-05-08 19:41:24 +00001254 vfs++;
1255 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1256 assigned_vfs++;
1257 }
1258 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1259 }
1260 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1261}
1262
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001263static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001264{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001265 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001266 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001267 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001268 u64 pkts;
1269 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001270
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001271 if (!eqo->enable_aic) {
1272 eqd = eqo->eqd;
1273 goto modify_eqd;
1274 }
1275
1276 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001277 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001278
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001279 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1280
Sathya Perla4097f662009-03-24 16:40:13 -07001281 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001282 if (time_before(now, stats->rx_jiffies)) {
1283 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001284 return;
1285 }
1286
Sathya Perlaac124ff2011-07-25 19:10:14 +00001287 /* Update once a second */
1288 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001289 return;
1290
Sathya Perlaab1594e2011-07-25 19:10:15 +00001291 do {
1292 start = u64_stats_fetch_begin_bh(&stats->sync);
1293 pkts = stats->rx_pkts;
1294 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1295
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001296 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001297 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001298 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001299 eqd = (stats->rx_pps / 110000) << 3;
1300 eqd = min(eqd, eqo->max_eqd);
1301 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001302 if (eqd < 10)
1303 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001304
1305modify_eqd:
1306 if (eqd != eqo->cur_eqd) {
1307 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1308 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001309 }
Sathya Perla4097f662009-03-24 16:40:13 -07001310}
1311
Sathya Perla3abcded2010-10-03 22:12:27 -07001312static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001313 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001314{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001315 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001316
Sathya Perlaab1594e2011-07-25 19:10:15 +00001317 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001318 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001319 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001320 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001321 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001322 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001323 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001324 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001325 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001326}
1327
Sathya Perla2e588f82011-03-11 02:49:26 +00001328static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001329{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001330 /* L4 checksum is not reliable for non TCP/UDP packets.
1331 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001332 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1333 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001334}
1335
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001336static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1337 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001338{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001339 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001340 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001341 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001342
Sathya Perla3abcded2010-10-03 22:12:27 -07001343 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001344 BUG_ON(!rx_page_info->page);
1345
Ajit Khaparde205859a2010-02-09 01:34:21 +00001346 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001347 dma_unmap_page(&adapter->pdev->dev,
1348 dma_unmap_addr(rx_page_info, bus),
1349 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001350 rx_page_info->last_page_user = false;
1351 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001352
1353 atomic_dec(&rxq->used);
1354 return rx_page_info;
1355}
1356
1357/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001358static void be_rx_compl_discard(struct be_rx_obj *rxo,
1359 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001360{
Sathya Perla3abcded2010-10-03 22:12:27 -07001361 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001362 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001363 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001364
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001365 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001366 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001367 put_page(page_info->page);
1368 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001369 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001370 }
1371}
1372
1373/*
1374 * skb_fill_rx_data forms a complete skb for an ether frame
1375 * indicated by rxcp.
1376 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001377static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1378 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001379{
Sathya Perla3abcded2010-10-03 22:12:27 -07001380 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001381 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001382 u16 i, j;
1383 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001384 u8 *start;
1385
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001386 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001387 start = page_address(page_info->page) + page_info->page_offset;
1388 prefetch(start);
1389
1390 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001391 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001392
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001393 skb->len = curr_frag_len;
1394 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001395 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001396 /* Complete packet has now been moved to data */
1397 put_page(page_info->page);
1398 skb->data_len = 0;
1399 skb->tail += curr_frag_len;
1400 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001401 hdr_len = ETH_HLEN;
1402 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001403 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001404 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001405 skb_shinfo(skb)->frags[0].page_offset =
1406 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001407 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001408 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001409 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001410 skb->tail += hdr_len;
1411 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001412 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001413
Sathya Perla2e588f82011-03-11 02:49:26 +00001414 if (rxcp->pkt_size <= rx_frag_size) {
1415 BUG_ON(rxcp->num_rcvd != 1);
1416 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001417 }
1418
1419 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001420 index_inc(&rxcp->rxq_idx, rxq->len);
1421 remaining = rxcp->pkt_size - curr_frag_len;
1422 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001423 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001424 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001425
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001426 /* Coalesce all frags from the same physical page in one slot */
1427 if (page_info->page_offset == 0) {
1428 /* Fresh page */
1429 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001430 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001431 skb_shinfo(skb)->frags[j].page_offset =
1432 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001433 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001434 skb_shinfo(skb)->nr_frags++;
1435 } else {
1436 put_page(page_info->page);
1437 }
1438
Eric Dumazet9e903e02011-10-18 21:00:24 +00001439 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001440 skb->len += curr_frag_len;
1441 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001442 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001443 remaining -= curr_frag_len;
1444 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001445 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001446 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001447 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001448}
1449
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001450/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001451static void be_rx_compl_process(struct be_rx_obj *rxo,
1452 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001453{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001454 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001455 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001456 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001457
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001458 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001459 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001460 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001461 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001462 return;
1463 }
1464
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001465 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001466
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001467 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001468 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001469 else
1470 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001471
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001472 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001473 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001474 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001475 skb->rxhash = rxcp->rss_hash;
1476
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001477
Jiri Pirko343e43c2011-08-25 02:50:51 +00001478 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001479 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001480
1481 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001482}
1483
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001484/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001485void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1486 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001487{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001488 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001489 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001490 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001491 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001492 u16 remaining, curr_frag_len;
1493 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001494
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001495 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001496 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001497 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001498 return;
1499 }
1500
Sathya Perla2e588f82011-03-11 02:49:26 +00001501 remaining = rxcp->pkt_size;
1502 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001503 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001504
1505 curr_frag_len = min(remaining, rx_frag_size);
1506
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001507 /* Coalesce all frags from the same physical page in one slot */
1508 if (i == 0 || page_info->page_offset == 0) {
1509 /* First frag or Fresh page */
1510 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001511 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001512 skb_shinfo(skb)->frags[j].page_offset =
1513 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001514 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001515 } else {
1516 put_page(page_info->page);
1517 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001518 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001519 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001520 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001521 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001522 memset(page_info, 0, sizeof(*page_info));
1523 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001524 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001525
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001526 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001527 skb->len = rxcp->pkt_size;
1528 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001529 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001530 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001531 if (adapter->netdev->features & NETIF_F_RXHASH)
1532 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001533
Jiri Pirko343e43c2011-08-25 02:50:51 +00001534 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001535 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001536
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001537 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001538}
1539
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001540static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1541 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001542{
Sathya Perla2e588f82011-03-11 02:49:26 +00001543 rxcp->pkt_size =
1544 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1545 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1546 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1547 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001548 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001549 rxcp->ip_csum =
1550 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1551 rxcp->l4_csum =
1552 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1553 rxcp->ipv6 =
1554 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1555 rxcp->rxq_idx =
1556 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1557 rxcp->num_rcvd =
1558 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1559 rxcp->pkt_type =
1560 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001561 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001562 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001563 if (rxcp->vlanf) {
1564 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001565 compl);
1566 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1567 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001568 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001569 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001570}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001571
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001572static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1573 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001574{
1575 rxcp->pkt_size =
1576 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1577 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1578 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1579 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001580 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001581 rxcp->ip_csum =
1582 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1583 rxcp->l4_csum =
1584 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1585 rxcp->ipv6 =
1586 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1587 rxcp->rxq_idx =
1588 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1589 rxcp->num_rcvd =
1590 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1591 rxcp->pkt_type =
1592 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001593 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001594 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001595 if (rxcp->vlanf) {
1596 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001597 compl);
1598 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1599 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001600 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001601 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001602}
1603
1604static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1605{
1606 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1607 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1608 struct be_adapter *adapter = rxo->adapter;
1609
1610 /* For checking the valid bit it is Ok to use either definition as the
1611 * valid bit is at the same position in both v0 and v1 Rx compl */
1612 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001613 return NULL;
1614
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001615 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001616 be_dws_le_to_cpu(compl, sizeof(*compl));
1617
1618 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001619 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001620 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001621 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001622
Sathya Perla15d72182011-03-21 20:49:26 +00001623 if (rxcp->vlanf) {
1624 /* vlanf could be wrongly set in some cards.
1625 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001626 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001627 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001628
Sathya Perla15d72182011-03-21 20:49:26 +00001629 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001630 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001631
Somnath Kotur939cf302011-08-18 21:51:49 -07001632 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001633 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001634 rxcp->vlanf = 0;
1635 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001636
1637 /* As the compl has been parsed, reset it; we wont touch it again */
1638 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001639
Sathya Perla3abcded2010-10-03 22:12:27 -07001640 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001641 return rxcp;
1642}
1643
Eric Dumazet1829b082011-03-01 05:48:12 +00001644static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001645{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001646 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001647
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001648 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001649 gfp |= __GFP_COMP;
1650 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001651}
1652
1653/*
1654 * Allocate a page, split it to fragments of size rx_frag_size and post as
1655 * receive buffers to BE
1656 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001657static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001658{
Sathya Perla3abcded2010-10-03 22:12:27 -07001659 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001660 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001661 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001662 struct page *pagep = NULL;
1663 struct be_eth_rx_d *rxd;
1664 u64 page_dmaaddr = 0, frag_dmaaddr;
1665 u32 posted, page_offset = 0;
1666
Sathya Perla3abcded2010-10-03 22:12:27 -07001667 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001668 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1669 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001670 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001671 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001672 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001673 break;
1674 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001675 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1676 0, adapter->big_page_size,
1677 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001678 page_info->page_offset = 0;
1679 } else {
1680 get_page(pagep);
1681 page_info->page_offset = page_offset + rx_frag_size;
1682 }
1683 page_offset = page_info->page_offset;
1684 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001685 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001686 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1687
1688 rxd = queue_head_node(rxq);
1689 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1690 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001691
1692 /* Any space left in the current big page for another frag? */
1693 if ((page_offset + rx_frag_size + rx_frag_size) >
1694 adapter->big_page_size) {
1695 pagep = NULL;
1696 page_info->last_page_user = true;
1697 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001698
1699 prev_page_info = page_info;
1700 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001701 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001702 }
1703 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001704 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001705
1706 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001707 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001708 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001709 } else if (atomic_read(&rxq->used) == 0) {
1710 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001711 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001712 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001713}
1714
Sathya Perla5fb379e2009-06-18 00:02:59 +00001715static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001716{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001717 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1718
1719 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1720 return NULL;
1721
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001722 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001723 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1724
1725 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1726
1727 queue_tail_inc(tx_cq);
1728 return txcp;
1729}
1730
Sathya Perla3c8def92011-06-12 20:01:58 +00001731static u16 be_tx_compl_process(struct be_adapter *adapter,
1732 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001733{
Sathya Perla3c8def92011-06-12 20:01:58 +00001734 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001735 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001736 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001737 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001738 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1739 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001740
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001741 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001742 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001743 sent_skbs[txq->tail] = NULL;
1744
1745 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001746 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001747
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001748 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001749 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001750 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001751 unmap_tx_frag(&adapter->pdev->dev, wrb,
1752 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001753 unmap_skb_hdr = false;
1754
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001755 num_wrbs++;
1756 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001757 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001758
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001759 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001760 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001761}
1762
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001763/* Return the number of events in the event queue */
1764static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001765{
1766 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001767 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001768
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001769 do {
1770 eqe = queue_tail_node(&eqo->q);
1771 if (eqe->evt == 0)
1772 break;
1773
1774 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001775 eqe->evt = 0;
1776 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001777 queue_tail_inc(&eqo->q);
1778 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001779
1780 return num;
1781}
1782
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001783/* Leaves the EQ is disarmed state */
1784static void be_eq_clean(struct be_eq_obj *eqo)
1785{
1786 int num = events_get(eqo);
1787
1788 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1789}
1790
1791static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001792{
1793 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001794 struct be_queue_info *rxq = &rxo->q;
1795 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001796 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00001797 struct be_adapter *adapter = rxo->adapter;
1798 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001799 u16 tail;
1800
Sathya Perlad23e9462012-12-17 19:38:51 +00001801 /* Consume pending rx completions.
1802 * Wait for the flush completion (identified by zero num_rcvd)
1803 * to arrive. Notify CQ even when there are no more CQ entries
1804 * for HW to flush partially coalesced CQ entries.
1805 * In Lancer, there is no need to wait for flush compl.
1806 */
1807 for (;;) {
1808 rxcp = be_rx_compl_get(rxo);
1809 if (rxcp == NULL) {
1810 if (lancer_chip(adapter))
1811 break;
1812
1813 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1814 dev_warn(&adapter->pdev->dev,
1815 "did not receive flush compl\n");
1816 break;
1817 }
1818 be_cq_notify(adapter, rx_cq->id, true, 0);
1819 mdelay(1);
1820 } else {
1821 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00001822 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00001823 if (rxcp->num_rcvd == 0)
1824 break;
1825 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001826 }
1827
Sathya Perlad23e9462012-12-17 19:38:51 +00001828 /* After cleanup, leave the CQ in unarmed state */
1829 be_cq_notify(adapter, rx_cq->id, false, 0);
1830
1831 /* Then free posted rx buffers that were not used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001832 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001833 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001834 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001835 put_page(page_info->page);
1836 memset(page_info, 0, sizeof(*page_info));
1837 }
1838 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001839 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001840}
1841
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001842static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001843{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001844 struct be_tx_obj *txo;
1845 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001846 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001847 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001848 struct sk_buff *sent_skb;
1849 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001850 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001851
Sathya Perlaa8e91792009-08-10 03:42:43 +00001852 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1853 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001854 pending_txqs = adapter->num_tx_qs;
1855
1856 for_all_tx_queues(adapter, txo, i) {
1857 txq = &txo->q;
1858 while ((txcp = be_tx_compl_get(&txo->cq))) {
1859 end_idx =
1860 AMAP_GET_BITS(struct amap_eth_tx_compl,
1861 wrb_index, txcp);
1862 num_wrbs += be_tx_compl_process(adapter, txo,
1863 end_idx);
1864 cmpl++;
1865 }
1866 if (cmpl) {
1867 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1868 atomic_sub(num_wrbs, &txq->used);
1869 cmpl = 0;
1870 num_wrbs = 0;
1871 }
1872 if (atomic_read(&txq->used) == 0)
1873 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001874 }
1875
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001876 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001877 break;
1878
1879 mdelay(1);
1880 } while (true);
1881
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001882 for_all_tx_queues(adapter, txo, i) {
1883 txq = &txo->q;
1884 if (atomic_read(&txq->used))
1885 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1886 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001887
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001888 /* free posted tx for which compls will never arrive */
1889 while (atomic_read(&txq->used)) {
1890 sent_skb = txo->sent_skb_list[txq->tail];
1891 end_idx = txq->tail;
1892 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1893 &dummy_wrb);
1894 index_adv(&end_idx, num_wrbs - 1, txq->len);
1895 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1896 atomic_sub(num_wrbs, &txq->used);
1897 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001898 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001899}
1900
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001901static void be_evt_queues_destroy(struct be_adapter *adapter)
1902{
1903 struct be_eq_obj *eqo;
1904 int i;
1905
1906 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001907 if (eqo->q.created) {
1908 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001909 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001910 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001911 be_queue_free(adapter, &eqo->q);
1912 }
1913}
1914
1915static int be_evt_queues_create(struct be_adapter *adapter)
1916{
1917 struct be_queue_info *eq;
1918 struct be_eq_obj *eqo;
1919 int i, rc;
1920
1921 adapter->num_evt_qs = num_irqs(adapter);
1922
1923 for_all_evt_queues(adapter, eqo, i) {
1924 eqo->adapter = adapter;
1925 eqo->tx_budget = BE_TX_BUDGET;
1926 eqo->idx = i;
1927 eqo->max_eqd = BE_MAX_EQD;
1928 eqo->enable_aic = true;
1929
1930 eq = &eqo->q;
1931 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1932 sizeof(struct be_eq_entry));
1933 if (rc)
1934 return rc;
1935
1936 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1937 if (rc)
1938 return rc;
1939 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001940 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001941}
1942
Sathya Perla5fb379e2009-06-18 00:02:59 +00001943static void be_mcc_queues_destroy(struct be_adapter *adapter)
1944{
1945 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001946
Sathya Perla8788fdc2009-07-27 22:52:03 +00001947 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001948 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001949 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001950 be_queue_free(adapter, q);
1951
Sathya Perla8788fdc2009-07-27 22:52:03 +00001952 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001953 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001954 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001955 be_queue_free(adapter, q);
1956}
1957
1958/* Must be called only after TX qs are created as MCC shares TX EQ */
1959static int be_mcc_queues_create(struct be_adapter *adapter)
1960{
1961 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001962
Sathya Perla8788fdc2009-07-27 22:52:03 +00001963 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001964 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001965 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001966 goto err;
1967
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001968 /* Use the default EQ for MCC completions */
1969 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001970 goto mcc_cq_free;
1971
Sathya Perla8788fdc2009-07-27 22:52:03 +00001972 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001973 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1974 goto mcc_cq_destroy;
1975
Sathya Perla8788fdc2009-07-27 22:52:03 +00001976 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001977 goto mcc_q_free;
1978
1979 return 0;
1980
1981mcc_q_free:
1982 be_queue_free(adapter, q);
1983mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001984 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001985mcc_cq_free:
1986 be_queue_free(adapter, cq);
1987err:
1988 return -1;
1989}
1990
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001991static void be_tx_queues_destroy(struct be_adapter *adapter)
1992{
1993 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001994 struct be_tx_obj *txo;
1995 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001996
Sathya Perla3c8def92011-06-12 20:01:58 +00001997 for_all_tx_queues(adapter, txo, i) {
1998 q = &txo->q;
1999 if (q->created)
2000 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2001 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002002
Sathya Perla3c8def92011-06-12 20:01:58 +00002003 q = &txo->cq;
2004 if (q->created)
2005 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2006 be_queue_free(adapter, q);
2007 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002008}
2009
Sathya Perladafc0fe2011-10-24 02:45:02 +00002010static int be_num_txqs_want(struct be_adapter *adapter)
2011{
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002012 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
2013 be_is_mc(adapter) ||
2014 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
Sathya Perlaca34fe32012-11-06 17:48:56 +00002015 BE2_chip(adapter))
Sathya Perladafc0fe2011-10-24 02:45:02 +00002016 return 1;
2017 else
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002018 return adapter->max_tx_queues;
Sathya Perladafc0fe2011-10-24 02:45:02 +00002019}
2020
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002021static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002022{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002023 struct be_queue_info *cq, *eq;
2024 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00002025 struct be_tx_obj *txo;
2026 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002027
Sathya Perladafc0fe2011-10-24 02:45:02 +00002028 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00002029 if (adapter->num_tx_qs != MAX_TX_QS) {
2030 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00002031 netif_set_real_num_tx_queues(adapter->netdev,
2032 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00002033 rtnl_unlock();
2034 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00002035
Sathya Perla3c8def92011-06-12 20:01:58 +00002036 for_all_tx_queues(adapter, txo, i) {
2037 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002038 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2039 sizeof(struct be_eth_tx_compl));
2040 if (status)
2041 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002042
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002043 /* If num_evt_qs is less than num_tx_qs, then more than
2044 * one txq share an eq
2045 */
2046 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2047 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2048 if (status)
2049 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00002050 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002051 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002052}
2053
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002054static int be_tx_qs_create(struct be_adapter *adapter)
2055{
2056 struct be_tx_obj *txo;
2057 int i, status;
2058
2059 for_all_tx_queues(adapter, txo, i) {
2060 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2061 sizeof(struct be_eth_wrb));
2062 if (status)
2063 return status;
2064
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002065 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002066 if (status)
2067 return status;
2068 }
2069
Sathya Perlad3791422012-09-28 04:39:44 +00002070 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2071 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002072 return 0;
2073}
2074
2075static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002076{
2077 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002078 struct be_rx_obj *rxo;
2079 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002080
Sathya Perla3abcded2010-10-03 22:12:27 -07002081 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002082 q = &rxo->cq;
2083 if (q->created)
2084 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2085 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002086 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002087}
2088
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002089static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002090{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002091 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002092 struct be_rx_obj *rxo;
2093 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002094
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002095 /* We'll create as many RSS rings as there are irqs.
2096 * But when there's only one irq there's no use creating RSS rings
2097 */
2098 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2099 num_irqs(adapter) + 1 : 1;
Sathya Perla7f640062012-06-05 19:37:20 +00002100 if (adapter->num_rx_qs != MAX_RX_QS) {
2101 rtnl_lock();
2102 netif_set_real_num_rx_queues(adapter->netdev,
2103 adapter->num_rx_qs);
2104 rtnl_unlock();
2105 }
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002106
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002107 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002108 for_all_rx_queues(adapter, rxo, i) {
2109 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002110 cq = &rxo->cq;
2111 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2112 sizeof(struct be_eth_rx_compl));
2113 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002114 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002115
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002116 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2117 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002118 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002119 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002120 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002121
Sathya Perlad3791422012-09-28 04:39:44 +00002122 dev_info(&adapter->pdev->dev,
2123 "created %d RSS queue(s) and 1 default RX queue\n",
2124 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002125 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002126}
2127
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002128static irqreturn_t be_intx(int irq, void *dev)
2129{
Sathya Perlae49cc342012-11-27 19:50:02 +00002130 struct be_eq_obj *eqo = dev;
2131 struct be_adapter *adapter = eqo->adapter;
2132 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002133
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002134 /* IRQ is not expected when NAPI is scheduled as the EQ
2135 * will not be armed.
2136 * But, this can happen on Lancer INTx where it takes
2137 * a while to de-assert INTx or in BE2 where occasionaly
2138 * an interrupt may be raised even when EQ is unarmed.
2139 * If NAPI is already scheduled, then counting & notifying
2140 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002141 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002142 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002143 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002144 __napi_schedule(&eqo->napi);
2145 if (num_evts)
2146 eqo->spurious_intr = 0;
2147 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002148 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002149
2150 /* Return IRQ_HANDLED only for the the first spurious intr
2151 * after a valid intr to stop the kernel from branding
2152 * this irq as a bad one!
2153 */
2154 if (num_evts || eqo->spurious_intr++ == 0)
2155 return IRQ_HANDLED;
2156 else
2157 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002158}
2159
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002160static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002161{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002162 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002163
Sathya Perla0b545a62012-11-23 00:27:18 +00002164 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2165 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002166 return IRQ_HANDLED;
2167}
2168
Sathya Perla2e588f82011-03-11 02:49:26 +00002169static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002170{
Sathya Perla2e588f82011-03-11 02:49:26 +00002171 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002172}
2173
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002174static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2175 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002176{
Sathya Perla3abcded2010-10-03 22:12:27 -07002177 struct be_adapter *adapter = rxo->adapter;
2178 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002179 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002180 u32 work_done;
2181
2182 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002183 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002184 if (!rxcp)
2185 break;
2186
Sathya Perla12004ae2011-08-02 19:57:46 +00002187 /* Is it a flush compl that has no data */
2188 if (unlikely(rxcp->num_rcvd == 0))
2189 goto loop_continue;
2190
2191 /* Discard compl with partial DMA Lancer B0 */
2192 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002193 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002194 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002195 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002196
Sathya Perla12004ae2011-08-02 19:57:46 +00002197 /* On BE drop pkts that arrive due to imperfect filtering in
2198 * promiscuous mode on some skews
2199 */
2200 if (unlikely(rxcp->port != adapter->port_num &&
2201 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002202 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002203 goto loop_continue;
2204 }
2205
2206 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002207 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002208 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002209 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002210loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002211 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002212 }
2213
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002214 if (work_done) {
2215 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002216
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002217 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2218 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002219 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002220
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002221 return work_done;
2222}
2223
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002224static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2225 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002226{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002227 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002228 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002229
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002230 for (work_done = 0; work_done < budget; work_done++) {
2231 txcp = be_tx_compl_get(&txo->cq);
2232 if (!txcp)
2233 break;
2234 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002235 AMAP_GET_BITS(struct amap_eth_tx_compl,
2236 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002237 }
2238
2239 if (work_done) {
2240 be_cq_notify(adapter, txo->cq.id, true, work_done);
2241 atomic_sub(num_wrbs, &txo->q.used);
2242
2243 /* As Tx wrbs have been freed up, wake up netdev queue
2244 * if it was stopped due to lack of tx wrbs. */
2245 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2246 atomic_read(&txo->q.used) < txo->q.len / 2) {
2247 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002248 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002249
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002250 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2251 tx_stats(txo)->tx_compl += work_done;
2252 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2253 }
2254 return (work_done < budget); /* Done */
2255}
Sathya Perla3c8def92011-06-12 20:01:58 +00002256
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002257int be_poll(struct napi_struct *napi, int budget)
2258{
2259 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2260 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002261 int max_work = 0, work, i, num_evts;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002262 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002263
Sathya Perla0b545a62012-11-23 00:27:18 +00002264 num_evts = events_get(eqo);
2265
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002266 /* Process all TXQs serviced by this EQ */
2267 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2268 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2269 eqo->tx_budget, i);
2270 if (!tx_done)
2271 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002272 }
2273
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002274 /* This loop will iterate twice for EQ0 in which
2275 * completions of the last RXQ (default one) are also processed
2276 * For other EQs the loop iterates only once
2277 */
2278 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2279 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2280 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002281 }
2282
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002283 if (is_mcc_eqo(eqo))
2284 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002285
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002286 if (max_work < budget) {
2287 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002288 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002289 } else {
2290 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002291 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002292 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002293 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002294}
2295
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002296void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002297{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002298 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2299 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002300 u32 i;
2301
Sathya Perlad23e9462012-12-17 19:38:51 +00002302 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002303 return;
2304
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002305 if (lancer_chip(adapter)) {
2306 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2307 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2308 sliport_err1 = ioread32(adapter->db +
2309 SLIPORT_ERROR1_OFFSET);
2310 sliport_err2 = ioread32(adapter->db +
2311 SLIPORT_ERROR2_OFFSET);
2312 }
2313 } else {
2314 pci_read_config_dword(adapter->pdev,
2315 PCICFG_UE_STATUS_LOW, &ue_lo);
2316 pci_read_config_dword(adapter->pdev,
2317 PCICFG_UE_STATUS_HIGH, &ue_hi);
2318 pci_read_config_dword(adapter->pdev,
2319 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2320 pci_read_config_dword(adapter->pdev,
2321 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002322
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002323 ue_lo = (ue_lo & ~ue_lo_mask);
2324 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002325 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002326
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002327 /* On certain platforms BE hardware can indicate spurious UEs.
2328 * Allow the h/w to stop working completely in case of a real UE.
2329 * Hence not setting the hw_error for UE detection.
2330 */
2331 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002332 adapter->hw_error = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002333 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002334 "Error detected in the card\n");
2335 }
2336
2337 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2338 dev_err(&adapter->pdev->dev,
2339 "ERR: sliport status 0x%x\n", sliport_status);
2340 dev_err(&adapter->pdev->dev,
2341 "ERR: sliport error1 0x%x\n", sliport_err1);
2342 dev_err(&adapter->pdev->dev,
2343 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002344 }
2345
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002346 if (ue_lo) {
2347 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2348 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002349 dev_err(&adapter->pdev->dev,
2350 "UE: %s bit set\n", ue_status_low_desc[i]);
2351 }
2352 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002353
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002354 if (ue_hi) {
2355 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2356 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002357 dev_err(&adapter->pdev->dev,
2358 "UE: %s bit set\n", ue_status_hi_desc[i]);
2359 }
2360 }
2361
2362}
2363
Sathya Perla8d56ff12009-11-22 22:02:26 +00002364static void be_msix_disable(struct be_adapter *adapter)
2365{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002366 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002367 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002368 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002369 }
2370}
2371
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002372static uint be_num_rss_want(struct be_adapter *adapter)
2373{
Yuval Mintz30e80b52012-07-01 03:19:00 +00002374 u32 num = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002375
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002376 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002377 (lancer_chip(adapter) ||
2378 (!sriov_want(adapter) && be_physfn(adapter)))) {
2379 num = adapter->max_rss_queues;
Yuval Mintz30e80b52012-07-01 03:19:00 +00002380 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2381 }
2382 return num;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002383}
2384
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002385static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002386{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002387#define BE_MIN_MSIX_VECTORS 1
Parav Pandit045508a2012-03-26 14:27:13 +00002388 int i, status, num_vec, num_roce_vec = 0;
Sathya Perlad3791422012-09-28 04:39:44 +00002389 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002390
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002391 /* If RSS queues are not used, need a vec for default RX Q */
2392 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
Parav Pandit045508a2012-03-26 14:27:13 +00002393 if (be_roce_supported(adapter)) {
2394 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2395 (num_online_cpus() + 1));
2396 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2397 num_vec += num_roce_vec;
2398 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2399 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002400 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002401
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002402 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002403 adapter->msix_entries[i].entry = i;
2404
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002405 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002406 if (status == 0) {
2407 goto done;
2408 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002409 num_vec = status;
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002410 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2411 num_vec);
2412 if (!status)
Sathya Perla3abcded2010-10-03 22:12:27 -07002413 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002414 }
Sathya Perlad3791422012-09-28 04:39:44 +00002415
2416 dev_warn(dev, "MSIx enable failed\n");
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002417 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2418 if (!be_physfn(adapter))
2419 return status;
2420 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002421done:
Parav Pandit045508a2012-03-26 14:27:13 +00002422 if (be_roce_supported(adapter)) {
2423 if (num_vec > num_roce_vec) {
2424 adapter->num_msix_vec = num_vec - num_roce_vec;
2425 adapter->num_msix_roce_vec =
2426 num_vec - adapter->num_msix_vec;
2427 } else {
2428 adapter->num_msix_vec = num_vec;
2429 adapter->num_msix_roce_vec = 0;
2430 }
2431 } else
2432 adapter->num_msix_vec = num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002433 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002434 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002435}
2436
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002437static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002438 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002439{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002440 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002441}
2442
2443static int be_msix_register(struct be_adapter *adapter)
2444{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002445 struct net_device *netdev = adapter->netdev;
2446 struct be_eq_obj *eqo;
2447 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002448
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002449 for_all_evt_queues(adapter, eqo, i) {
2450 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2451 vec = be_msix_vec_get(adapter, eqo);
2452 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002453 if (status)
2454 goto err_msix;
2455 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002456
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002457 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002458err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002459 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2460 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2461 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2462 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002463 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002464 return status;
2465}
2466
2467static int be_irq_register(struct be_adapter *adapter)
2468{
2469 struct net_device *netdev = adapter->netdev;
2470 int status;
2471
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002472 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002473 status = be_msix_register(adapter);
2474 if (status == 0)
2475 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002476 /* INTx is not supported for VF */
2477 if (!be_physfn(adapter))
2478 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002479 }
2480
Sathya Perlae49cc342012-11-27 19:50:02 +00002481 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002482 netdev->irq = adapter->pdev->irq;
2483 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002484 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002485 if (status) {
2486 dev_err(&adapter->pdev->dev,
2487 "INTx request IRQ failed - err %d\n", status);
2488 return status;
2489 }
2490done:
2491 adapter->isr_registered = true;
2492 return 0;
2493}
2494
2495static void be_irq_unregister(struct be_adapter *adapter)
2496{
2497 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002498 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002499 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002500
2501 if (!adapter->isr_registered)
2502 return;
2503
2504 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002505 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002506 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002507 goto done;
2508 }
2509
2510 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002511 for_all_evt_queues(adapter, eqo, i)
2512 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002513
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002514done:
2515 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002516}
2517
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002518static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002519{
2520 struct be_queue_info *q;
2521 struct be_rx_obj *rxo;
2522 int i;
2523
2524 for_all_rx_queues(adapter, rxo, i) {
2525 q = &rxo->q;
2526 if (q->created) {
2527 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002528 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002529 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002530 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002531 }
2532}
2533
Sathya Perla889cd4b2010-05-30 23:33:45 +00002534static int be_close(struct net_device *netdev)
2535{
2536 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002537 struct be_eq_obj *eqo;
2538 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002539
Parav Pandit045508a2012-03-26 14:27:13 +00002540 be_roce_dev_close(adapter);
2541
Somnath Kotur04d3d622013-05-02 03:36:55 +00002542 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2543 for_all_evt_queues(adapter, eqo, i)
2544 napi_disable(&eqo->napi);
2545 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2546 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002547
2548 be_async_mcc_disable(adapter);
2549
2550 /* Wait for all pending tx completions to arrive so that
2551 * all tx skbs are freed.
2552 */
2553 be_tx_compl_clean(adapter);
Sathya Perlafba87552013-05-08 02:05:50 +00002554 netif_tx_disable(netdev);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002555
2556 be_rx_qs_destroy(adapter);
2557
2558 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002559 if (msix_enabled(adapter))
2560 synchronize_irq(be_msix_vec_get(adapter, eqo));
2561 else
2562 synchronize_irq(netdev->irq);
2563 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002564 }
2565
Sathya Perla889cd4b2010-05-30 23:33:45 +00002566 be_irq_unregister(adapter);
2567
Sathya Perla482c9e72011-06-29 23:33:17 +00002568 return 0;
2569}
2570
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002571static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002572{
2573 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002574 int rc, i, j;
2575 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002576
2577 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002578 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2579 sizeof(struct be_eth_rx_d));
2580 if (rc)
2581 return rc;
2582 }
2583
2584 /* The FW would like the default RXQ to be created first */
2585 rxo = default_rxo(adapter);
2586 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2587 adapter->if_handle, false, &rxo->rss_id);
2588 if (rc)
2589 return rc;
2590
2591 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002592 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002593 rx_frag_size, adapter->if_handle,
2594 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002595 if (rc)
2596 return rc;
2597 }
2598
2599 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002600 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2601 for_all_rss_queues(adapter, rxo, i) {
2602 if ((j + i) >= 128)
2603 break;
2604 rsstable[j + i] = rxo->rss_id;
2605 }
2606 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002607 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2608 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2609
2610 if (!BEx_chip(adapter))
2611 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2612 RSS_ENABLE_UDP_IPV6;
2613
2614 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2615 128);
2616 if (rc) {
2617 adapter->rss_flags = 0;
Sathya Perla482c9e72011-06-29 23:33:17 +00002618 return rc;
Suresh Reddy594ad542013-04-25 23:03:20 +00002619 }
Sathya Perla482c9e72011-06-29 23:33:17 +00002620 }
2621
2622 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002623 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002624 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002625 return 0;
2626}
2627
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002628static int be_open(struct net_device *netdev)
2629{
2630 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002631 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002632 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002633 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002634 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002635 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002636
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002637 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002638 if (status)
2639 goto err;
2640
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002641 status = be_irq_register(adapter);
2642 if (status)
2643 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002644
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002645 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002646 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002647
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002648 for_all_tx_queues(adapter, txo, i)
2649 be_cq_notify(adapter, txo->cq.id, true, 0);
2650
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002651 be_async_mcc_enable(adapter);
2652
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002653 for_all_evt_queues(adapter, eqo, i) {
2654 napi_enable(&eqo->napi);
2655 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2656 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002657 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002658
Sathya Perla323ff712012-09-28 04:39:43 +00002659 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002660 if (!status)
2661 be_link_status_update(adapter, link_status);
2662
Sathya Perlafba87552013-05-08 02:05:50 +00002663 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002664 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002665 return 0;
2666err:
2667 be_close(adapter->netdev);
2668 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002669}
2670
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002671static int be_setup_wol(struct be_adapter *adapter, bool enable)
2672{
2673 struct be_dma_mem cmd;
2674 int status = 0;
2675 u8 mac[ETH_ALEN];
2676
2677 memset(mac, 0, ETH_ALEN);
2678
2679 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002680 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
Joe Perches1f9061d22013-03-15 07:23:58 +00002681 GFP_KERNEL | __GFP_ZERO);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002682 if (cmd.va == NULL)
2683 return -1;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002684
2685 if (enable) {
2686 status = pci_write_config_dword(adapter->pdev,
2687 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2688 if (status) {
2689 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002690 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002691 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2692 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002693 return status;
2694 }
2695 status = be_cmd_enable_magic_wol(adapter,
2696 adapter->netdev->dev_addr, &cmd);
2697 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2698 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2699 } else {
2700 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2701 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2702 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2703 }
2704
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002705 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002706 return status;
2707}
2708
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002709/*
2710 * Generate a seed MAC address from the PF MAC Address using jhash.
2711 * MAC Address for VFs are assigned incrementally starting from the seed.
2712 * These addresses are programmed in the ASIC by the PF and the VF driver
2713 * queries for the MAC address during its probe.
2714 */
Sathya Perla4c876612013-02-03 20:30:11 +00002715static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002716{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002717 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002718 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002719 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002720 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002721
2722 be_vf_eth_addr_generate(adapter, mac);
2723
Sathya Perla11ac75e2011-12-13 00:58:50 +00002724 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002725 if (lancer_chip(adapter)) {
2726 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2727 } else {
2728 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002729 vf_cfg->if_handle,
2730 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002731 }
2732
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002733 if (status)
2734 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002735 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002736 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002737 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002738
2739 mac[5] += 1;
2740 }
2741 return status;
2742}
2743
Sathya Perla4c876612013-02-03 20:30:11 +00002744static int be_vfs_mac_query(struct be_adapter *adapter)
2745{
2746 int status, vf;
2747 u8 mac[ETH_ALEN];
2748 struct be_vf_cfg *vf_cfg;
2749 bool active;
2750
2751 for_all_vfs(adapter, vf_cfg, vf) {
2752 be_cmd_get_mac_from_list(adapter, mac, &active,
2753 &vf_cfg->pmac_id, 0);
2754
2755 status = be_cmd_mac_addr_query(adapter, mac, false,
2756 vf_cfg->if_handle, 0);
2757 if (status)
2758 return status;
2759 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2760 }
2761 return 0;
2762}
2763
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002764static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002765{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002766 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002767 u32 vf;
2768
Sathya Perla39f1d942012-05-08 19:41:24 +00002769 if (be_find_vfs(adapter, ASSIGNED)) {
Sathya Perla4c876612013-02-03 20:30:11 +00002770 dev_warn(&adapter->pdev->dev,
2771 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00002772 goto done;
2773 }
2774
Sathya Perlab4c1df92013-05-08 02:05:47 +00002775 pci_disable_sriov(adapter->pdev);
2776
Sathya Perla11ac75e2011-12-13 00:58:50 +00002777 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002778 if (lancer_chip(adapter))
2779 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2780 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002781 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2782 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002783
Sathya Perla11ac75e2011-12-13 00:58:50 +00002784 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2785 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002786done:
2787 kfree(adapter->vf_cfg);
2788 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002789}
2790
Sathya Perlaa54769f2011-10-24 02:45:00 +00002791static int be_clear(struct be_adapter *adapter)
2792{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002793 int i = 1;
2794
Sathya Perla191eb752012-02-23 18:50:13 +00002795 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2796 cancel_delayed_work_sync(&adapter->work);
2797 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2798 }
2799
Sathya Perla11ac75e2011-12-13 00:58:50 +00002800 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002801 be_vf_clear(adapter);
2802
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002803 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2804 be_cmd_pmac_del(adapter, adapter->if_handle,
2805 adapter->pmac_id[i], 0);
2806
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002807 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002808
2809 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002810 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002811 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002812 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002813
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002814 kfree(adapter->pmac_id);
2815 adapter->pmac_id = NULL;
2816
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002817 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002818 return 0;
2819}
2820
Sathya Perla4c876612013-02-03 20:30:11 +00002821static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002822{
Sathya Perla4c876612013-02-03 20:30:11 +00002823 struct be_vf_cfg *vf_cfg;
2824 u32 cap_flags, en_flags, vf;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002825 int status;
2826
Sathya Perla4c876612013-02-03 20:30:11 +00002827 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2828 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002829
Sathya Perla4c876612013-02-03 20:30:11 +00002830 for_all_vfs(adapter, vf_cfg, vf) {
2831 if (!BE3_chip(adapter))
Vasundhara Volama05f99d2013-04-21 23:28:17 +00002832 be_cmd_get_profile_config(adapter, &cap_flags,
2833 NULL, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00002834
2835 /* If a FW profile exists, then cap_flags are updated */
2836 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2837 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2838 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2839 &vf_cfg->if_handle, vf + 1);
2840 if (status)
2841 goto err;
2842 }
2843err:
2844 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002845}
2846
Sathya Perla39f1d942012-05-08 19:41:24 +00002847static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002848{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002849 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002850 int vf;
2851
Sathya Perla39f1d942012-05-08 19:41:24 +00002852 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2853 GFP_KERNEL);
2854 if (!adapter->vf_cfg)
2855 return -ENOMEM;
2856
Sathya Perla11ac75e2011-12-13 00:58:50 +00002857 for_all_vfs(adapter, vf_cfg, vf) {
2858 vf_cfg->if_handle = -1;
2859 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002860 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002861 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002862}
2863
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002864static int be_vf_setup(struct be_adapter *adapter)
2865{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002866 struct be_vf_cfg *vf_cfg;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002867 u16 def_vlan, lnk_speed;
Sathya Perla4c876612013-02-03 20:30:11 +00002868 int status, old_vfs, vf;
2869 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002870
Sathya Perla4c876612013-02-03 20:30:11 +00002871 old_vfs = be_find_vfs(adapter, ENABLED);
2872 if (old_vfs) {
2873 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2874 if (old_vfs != num_vfs)
2875 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2876 adapter->num_vfs = old_vfs;
Sathya Perla39f1d942012-05-08 19:41:24 +00002877 } else {
Sathya Perla4c876612013-02-03 20:30:11 +00002878 if (num_vfs > adapter->dev_num_vfs)
2879 dev_info(dev, "Device supports %d VFs and not %d\n",
2880 adapter->dev_num_vfs, num_vfs);
2881 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
Sathya Perlab4c1df92013-05-08 02:05:47 +00002882 if (!adapter->num_vfs)
Sathya Perla4c876612013-02-03 20:30:11 +00002883 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00002884 }
2885
2886 status = be_vf_setup_init(adapter);
2887 if (status)
2888 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002889
Sathya Perla4c876612013-02-03 20:30:11 +00002890 if (old_vfs) {
2891 for_all_vfs(adapter, vf_cfg, vf) {
2892 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2893 if (status)
2894 goto err;
2895 }
2896 } else {
2897 status = be_vfs_if_create(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002898 if (status)
2899 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002900 }
2901
Sathya Perla4c876612013-02-03 20:30:11 +00002902 if (old_vfs) {
2903 status = be_vfs_mac_query(adapter);
2904 if (status)
2905 goto err;
2906 } else {
Sathya Perla39f1d942012-05-08 19:41:24 +00002907 status = be_vf_eth_addr_config(adapter);
2908 if (status)
2909 goto err;
2910 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002911
Sathya Perla11ac75e2011-12-13 00:58:50 +00002912 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla4c876612013-02-03 20:30:11 +00002913 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2914 * Allow full available bandwidth
2915 */
2916 if (BE3_chip(adapter) && !old_vfs)
2917 be_cmd_set_qos(adapter, 1000, vf+1);
2918
2919 status = be_cmd_link_status_query(adapter, &lnk_speed,
2920 NULL, vf + 1);
2921 if (!status)
2922 vf_cfg->tx_rate = lnk_speed;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002923
2924 status = be_cmd_get_hsw_config(adapter, &def_vlan,
Sathya Perla4c876612013-02-03 20:30:11 +00002925 vf + 1, vf_cfg->if_handle);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002926 if (status)
2927 goto err;
2928 vf_cfg->def_vid = def_vlan;
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00002929
2930 be_cmd_enable_vf(adapter, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002931 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00002932
2933 if (!old_vfs) {
2934 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2935 if (status) {
2936 dev_err(dev, "SRIOV enable failed\n");
2937 adapter->num_vfs = 0;
2938 goto err;
2939 }
2940 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002941 return 0;
2942err:
Sathya Perla4c876612013-02-03 20:30:11 +00002943 dev_err(dev, "VF setup failed\n");
2944 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002945 return status;
2946}
2947
Sathya Perla30128032011-11-10 19:17:57 +00002948static void be_setup_init(struct be_adapter *adapter)
2949{
2950 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002951 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002952 adapter->if_handle = -1;
2953 adapter->be3_native = false;
2954 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002955 if (be_physfn(adapter))
2956 adapter->cmd_privileges = MAX_PRIVILEGES;
2957 else
2958 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00002959}
2960
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002961static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2962 bool *active_mac, u32 *pmac_id)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002963{
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002964 int status = 0;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002965
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002966 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2967 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2968 if (!lancer_chip(adapter) && !be_physfn(adapter))
2969 *active_mac = true;
2970 else
2971 *active_mac = false;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002972
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002973 return status;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002974 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002975
2976 if (lancer_chip(adapter)) {
2977 status = be_cmd_get_mac_from_list(adapter, mac,
2978 active_mac, pmac_id, 0);
2979 if (*active_mac) {
Sathya Perla5ee49792012-09-28 04:39:41 +00002980 status = be_cmd_mac_addr_query(adapter, mac, false,
2981 if_handle, *pmac_id);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002982 }
2983 } else if (be_physfn(adapter)) {
2984 /* For BE3, for PF get permanent MAC */
Sathya Perla5ee49792012-09-28 04:39:41 +00002985 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002986 *active_mac = false;
2987 } else {
2988 /* For BE3, for VF get soft MAC assigned by PF*/
Sathya Perla5ee49792012-09-28 04:39:41 +00002989 status = be_cmd_mac_addr_query(adapter, mac, false,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002990 if_handle, 0);
2991 *active_mac = true;
2992 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002993 return status;
2994}
2995
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002996static void be_get_resources(struct be_adapter *adapter)
2997{
Sathya Perla4c876612013-02-03 20:30:11 +00002998 u16 dev_num_vfs;
2999 int pos, status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003000 bool profile_present = false;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003001 u16 txq_count = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003002
Sathya Perla4c876612013-02-03 20:30:11 +00003003 if (!BEx_chip(adapter)) {
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003004 status = be_cmd_get_func_config(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003005 if (!status)
3006 profile_present = true;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003007 } else if (BE3_chip(adapter) && be_physfn(adapter)) {
3008 be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003009 }
3010
3011 if (profile_present) {
3012 /* Sanity fixes for Lancer */
3013 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
3014 BE_UC_PMAC_COUNT);
3015 adapter->max_vlans = min_t(u16, adapter->max_vlans,
3016 BE_NUM_VLANS_SUPPORTED);
3017 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
3018 BE_MAX_MC);
3019 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3020 MAX_TX_QS);
3021 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
3022 BE3_MAX_RSS_QS);
3023 adapter->max_event_queues = min_t(u16,
3024 adapter->max_event_queues,
3025 BE3_MAX_RSS_QS);
3026
3027 if (adapter->max_rss_queues &&
3028 adapter->max_rss_queues == adapter->max_rx_queues)
3029 adapter->max_rss_queues -= 1;
3030
3031 if (adapter->max_event_queues < adapter->max_rss_queues)
3032 adapter->max_rss_queues = adapter->max_event_queues;
3033
3034 } else {
3035 if (be_physfn(adapter))
3036 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3037 else
3038 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3039
3040 if (adapter->function_mode & FLEX10_MODE)
3041 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3042 else
3043 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3044
3045 adapter->max_mcast_mac = BE_MAX_MC;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003046 adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
3047 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3048 MAX_TX_QS);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003049 adapter->max_rss_queues = (adapter->be3_native) ?
3050 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3051 adapter->max_event_queues = BE3_MAX_RSS_QS;
3052
3053 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
3054 BE_IF_FLAGS_BROADCAST |
3055 BE_IF_FLAGS_MULTICAST |
3056 BE_IF_FLAGS_PASS_L3L4_ERRORS |
3057 BE_IF_FLAGS_MCAST_PROMISCUOUS |
3058 BE_IF_FLAGS_VLAN_PROMISCUOUS |
3059 BE_IF_FLAGS_PROMISCUOUS;
3060
3061 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3062 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
3063 }
Sathya Perla4c876612013-02-03 20:30:11 +00003064
3065 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
3066 if (pos) {
3067 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
3068 &dev_num_vfs);
3069 if (BE3_chip(adapter))
3070 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
3071 adapter->dev_num_vfs = dev_num_vfs;
3072 }
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003073}
3074
Sathya Perla39f1d942012-05-08 19:41:24 +00003075/* Routine to query per function resource limits */
3076static int be_get_config(struct be_adapter *adapter)
3077{
Sathya Perla4c876612013-02-03 20:30:11 +00003078 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003079
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003080 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3081 &adapter->function_mode,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003082 &adapter->function_caps,
3083 &adapter->asic_rev);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003084 if (status)
3085 goto err;
3086
3087 be_get_resources(adapter);
3088
3089 /* primary mac needs 1 pmac entry */
3090 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3091 sizeof(u32), GFP_KERNEL);
3092 if (!adapter->pmac_id) {
3093 status = -ENOMEM;
3094 goto err;
3095 }
3096
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003097err:
3098 return status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003099}
3100
Sathya Perla5fb379e2009-06-18 00:02:59 +00003101static int be_setup(struct be_adapter *adapter)
3102{
Sathya Perla39f1d942012-05-08 19:41:24 +00003103 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003104 u32 en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003105 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003106 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003107 u8 mac[ETH_ALEN];
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003108 bool active_mac;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003109
Sathya Perla30128032011-11-10 19:17:57 +00003110 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003111
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003112 if (!lancer_chip(adapter))
3113 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003114
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003115 status = be_get_config(adapter);
3116 if (status)
3117 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003118
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003119 status = be_msix_enable(adapter);
3120 if (status)
3121 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003122
3123 status = be_evt_queues_create(adapter);
3124 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003125 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003126
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003127 status = be_tx_cqs_create(adapter);
3128 if (status)
3129 goto err;
3130
3131 status = be_rx_cqs_create(adapter);
3132 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003133 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003134
Sathya Perla5fb379e2009-06-18 00:02:59 +00003135 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003136 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003137 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003138
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003139 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3140 /* In UMC mode FW does not return right privileges.
3141 * Override with correct privilege equivalent to PF.
3142 */
3143 if (be_is_mc(adapter))
3144 adapter->cmd_privileges = MAX_PRIVILEGES;
3145
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003146 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3147 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00003148
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003149 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003150 en_flags |= BE_IF_FLAGS_RSS;
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003151
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003152 en_flags = en_flags & adapter->if_cap_flags;
Padmanabh Ratnakar0b13fb42012-07-18 02:51:58 +00003153
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003154 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003155 &adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003156 if (status != 0)
3157 goto err;
3158
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003159 memset(mac, 0, ETH_ALEN);
3160 active_mac = false;
3161 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3162 &active_mac, &adapter->pmac_id[0]);
3163 if (status != 0)
3164 goto err;
3165
3166 if (!active_mac) {
3167 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3168 &adapter->pmac_id[0], 0);
3169 if (status != 0)
3170 goto err;
3171 }
3172
3173 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3174 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3175 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003176 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00003177
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003178 status = be_tx_qs_create(adapter);
3179 if (status)
3180 goto err;
3181
Sathya Perla04b71172011-09-27 13:30:27 -04003182 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003183
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003184 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003185 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003186
3187 be_set_rx_mode(adapter->netdev);
3188
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003189 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003190
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003191 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3192 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003193 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003194
Sathya Perlab4c1df92013-05-08 02:05:47 +00003195 if (be_physfn(adapter)) {
Sathya Perla39f1d942012-05-08 19:41:24 +00003196 if (adapter->dev_num_vfs)
3197 be_vf_setup(adapter);
3198 else
3199 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003200 }
3201
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003202 status = be_cmd_get_phy_info(adapter);
3203 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003204 adapter->phy.fc_autoneg = 1;
3205
Sathya Perla191eb752012-02-23 18:50:13 +00003206 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3207 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003208 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003209err:
3210 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003211 return status;
3212}
3213
Ivan Vecera66268732011-12-08 01:31:21 +00003214#ifdef CONFIG_NET_POLL_CONTROLLER
3215static void be_netpoll(struct net_device *netdev)
3216{
3217 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003218 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003219 int i;
3220
Sathya Perlae49cc342012-11-27 19:50:02 +00003221 for_all_evt_queues(adapter, eqo, i) {
3222 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3223 napi_schedule(&eqo->napi);
3224 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003225
3226 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003227}
3228#endif
3229
Ajit Khaparde84517482009-09-04 03:12:16 +00003230#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003231char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3232
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003233static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003234 const u8 *p, u32 img_start, int image_size,
3235 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003236{
3237 u32 crc_offset;
3238 u8 flashed_crc[4];
3239 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003240
3241 crc_offset = hdr_size + img_start + image_size - 4;
3242
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003243 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003244
3245 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003246 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003247 if (status) {
3248 dev_err(&adapter->pdev->dev,
3249 "could not get crc from flash, not flashing redboot\n");
3250 return false;
3251 }
3252
3253 /*update redboot only if crc does not match*/
3254 if (!memcmp(flashed_crc, p, 4))
3255 return false;
3256 else
3257 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003258}
3259
Sathya Perla306f1342011-08-02 19:57:45 +00003260static bool phy_flashing_required(struct be_adapter *adapter)
3261{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003262 return (adapter->phy.phy_type == TN_8022 &&
3263 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003264}
3265
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003266static bool is_comp_in_ufi(struct be_adapter *adapter,
3267 struct flash_section_info *fsec, int type)
3268{
3269 int i = 0, img_type = 0;
3270 struct flash_section_info_g2 *fsec_g2 = NULL;
3271
Sathya Perlaca34fe32012-11-06 17:48:56 +00003272 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003273 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3274
3275 for (i = 0; i < MAX_FLASH_COMP; i++) {
3276 if (fsec_g2)
3277 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3278 else
3279 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3280
3281 if (img_type == type)
3282 return true;
3283 }
3284 return false;
3285
3286}
3287
3288struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3289 int header_size,
3290 const struct firmware *fw)
3291{
3292 struct flash_section_info *fsec = NULL;
3293 const u8 *p = fw->data;
3294
3295 p += header_size;
3296 while (p < (fw->data + fw->size)) {
3297 fsec = (struct flash_section_info *)p;
3298 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3299 return fsec;
3300 p += 32;
3301 }
3302 return NULL;
3303}
3304
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003305static int be_flash(struct be_adapter *adapter, const u8 *img,
3306 struct be_dma_mem *flash_cmd, int optype, int img_size)
3307{
3308 u32 total_bytes = 0, flash_op, num_bytes = 0;
3309 int status = 0;
3310 struct be_cmd_write_flashrom *req = flash_cmd->va;
3311
3312 total_bytes = img_size;
3313 while (total_bytes) {
3314 num_bytes = min_t(u32, 32*1024, total_bytes);
3315
3316 total_bytes -= num_bytes;
3317
3318 if (!total_bytes) {
3319 if (optype == OPTYPE_PHY_FW)
3320 flash_op = FLASHROM_OPER_PHY_FLASH;
3321 else
3322 flash_op = FLASHROM_OPER_FLASH;
3323 } else {
3324 if (optype == OPTYPE_PHY_FW)
3325 flash_op = FLASHROM_OPER_PHY_SAVE;
3326 else
3327 flash_op = FLASHROM_OPER_SAVE;
3328 }
3329
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003330 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003331 img += num_bytes;
3332 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3333 flash_op, num_bytes);
3334 if (status) {
3335 if (status == ILLEGAL_IOCTL_REQ &&
3336 optype == OPTYPE_PHY_FW)
3337 break;
3338 dev_err(&adapter->pdev->dev,
3339 "cmd to write to flash rom failed.\n");
3340 return status;
3341 }
3342 }
3343 return 0;
3344}
3345
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003346/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003347static int be_flash_BEx(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003348 const struct firmware *fw,
3349 struct be_dma_mem *flash_cmd,
3350 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003351
Ajit Khaparde84517482009-09-04 03:12:16 +00003352{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003353 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003354 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003355 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003356 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003357 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003358 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003359
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003360 struct flash_comp gen3_flash_types[] = {
3361 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3362 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3363 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3364 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3365 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3366 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3367 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3368 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3369 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3370 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3371 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3372 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3373 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3374 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3375 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3376 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3377 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3378 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3379 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3380 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003381 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003382
3383 struct flash_comp gen2_flash_types[] = {
3384 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3385 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3386 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3387 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3388 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3389 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3390 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3391 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3392 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3393 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3394 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3395 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3396 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3397 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3398 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3399 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003400 };
3401
Sathya Perlaca34fe32012-11-06 17:48:56 +00003402 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003403 pflashcomp = gen3_flash_types;
3404 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003405 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003406 } else {
3407 pflashcomp = gen2_flash_types;
3408 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003409 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003410 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003411
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003412 /* Get flash section info*/
3413 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3414 if (!fsec) {
3415 dev_err(&adapter->pdev->dev,
3416 "Invalid Cookie. UFI corrupted ?\n");
3417 return -1;
3418 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003419 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003420 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003421 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003422
3423 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3424 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3425 continue;
3426
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003427 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3428 !phy_flashing_required(adapter))
3429 continue;
3430
3431 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3432 redboot = be_flash_redboot(adapter, fw->data,
3433 pflashcomp[i].offset, pflashcomp[i].size,
3434 filehdr_size + img_hdrs_size);
3435 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003436 continue;
3437 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003438
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003439 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003440 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003441 if (p + pflashcomp[i].size > fw->data + fw->size)
3442 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003443
3444 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3445 pflashcomp[i].size);
3446 if (status) {
3447 dev_err(&adapter->pdev->dev,
3448 "Flashing section type %d failed.\n",
3449 pflashcomp[i].img_type);
3450 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003451 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003452 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003453 return 0;
3454}
3455
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003456static int be_flash_skyhawk(struct be_adapter *adapter,
3457 const struct firmware *fw,
3458 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003459{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003460 int status = 0, i, filehdr_size = 0;
3461 int img_offset, img_size, img_optype, redboot;
3462 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3463 const u8 *p = fw->data;
3464 struct flash_section_info *fsec = NULL;
3465
3466 filehdr_size = sizeof(struct flash_file_hdr_g3);
3467 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3468 if (!fsec) {
3469 dev_err(&adapter->pdev->dev,
3470 "Invalid Cookie. UFI corrupted ?\n");
3471 return -1;
3472 }
3473
3474 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3475 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3476 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3477
3478 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3479 case IMAGE_FIRMWARE_iSCSI:
3480 img_optype = OPTYPE_ISCSI_ACTIVE;
3481 break;
3482 case IMAGE_BOOT_CODE:
3483 img_optype = OPTYPE_REDBOOT;
3484 break;
3485 case IMAGE_OPTION_ROM_ISCSI:
3486 img_optype = OPTYPE_BIOS;
3487 break;
3488 case IMAGE_OPTION_ROM_PXE:
3489 img_optype = OPTYPE_PXE_BIOS;
3490 break;
3491 case IMAGE_OPTION_ROM_FCoE:
3492 img_optype = OPTYPE_FCOE_BIOS;
3493 break;
3494 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3495 img_optype = OPTYPE_ISCSI_BACKUP;
3496 break;
3497 case IMAGE_NCSI:
3498 img_optype = OPTYPE_NCSI_FW;
3499 break;
3500 default:
3501 continue;
3502 }
3503
3504 if (img_optype == OPTYPE_REDBOOT) {
3505 redboot = be_flash_redboot(adapter, fw->data,
3506 img_offset, img_size,
3507 filehdr_size + img_hdrs_size);
3508 if (!redboot)
3509 continue;
3510 }
3511
3512 p = fw->data;
3513 p += filehdr_size + img_offset + img_hdrs_size;
3514 if (p + img_size > fw->data + fw->size)
3515 return -1;
3516
3517 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3518 if (status) {
3519 dev_err(&adapter->pdev->dev,
3520 "Flashing section type %d failed.\n",
3521 fsec->fsec_entry[i].type);
3522 return status;
3523 }
3524 }
3525 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003526}
3527
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003528static int lancer_wait_idle(struct be_adapter *adapter)
3529{
3530#define SLIPORT_IDLE_TIMEOUT 30
3531 u32 reg_val;
3532 int status = 0, i;
3533
3534 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3535 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3536 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3537 break;
3538
3539 ssleep(1);
3540 }
3541
3542 if (i == SLIPORT_IDLE_TIMEOUT)
3543 status = -1;
3544
3545 return status;
3546}
3547
3548static int lancer_fw_reset(struct be_adapter *adapter)
3549{
3550 int status = 0;
3551
3552 status = lancer_wait_idle(adapter);
3553 if (status)
3554 return status;
3555
3556 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3557 PHYSDEV_CONTROL_OFFSET);
3558
3559 return status;
3560}
3561
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003562static int lancer_fw_download(struct be_adapter *adapter,
3563 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003564{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003565#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3566#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3567 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003568 const u8 *data_ptr = NULL;
3569 u8 *dest_image_ptr = NULL;
3570 size_t image_size = 0;
3571 u32 chunk_size = 0;
3572 u32 data_written = 0;
3573 u32 offset = 0;
3574 int status = 0;
3575 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003576 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003577
3578 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3579 dev_err(&adapter->pdev->dev,
3580 "FW Image not properly aligned. "
3581 "Length must be 4 byte aligned.\n");
3582 status = -EINVAL;
3583 goto lancer_fw_exit;
3584 }
3585
3586 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3587 + LANCER_FW_DOWNLOAD_CHUNK;
3588 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00003589 &flash_cmd.dma, GFP_KERNEL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003590 if (!flash_cmd.va) {
3591 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003592 goto lancer_fw_exit;
3593 }
3594
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003595 dest_image_ptr = flash_cmd.va +
3596 sizeof(struct lancer_cmd_req_write_object);
3597 image_size = fw->size;
3598 data_ptr = fw->data;
3599
3600 while (image_size) {
3601 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3602
3603 /* Copy the image chunk content. */
3604 memcpy(dest_image_ptr, data_ptr, chunk_size);
3605
3606 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003607 chunk_size, offset,
3608 LANCER_FW_DOWNLOAD_LOCATION,
3609 &data_written, &change_status,
3610 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003611 if (status)
3612 break;
3613
3614 offset += data_written;
3615 data_ptr += data_written;
3616 image_size -= data_written;
3617 }
3618
3619 if (!status) {
3620 /* Commit the FW written */
3621 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003622 0, offset,
3623 LANCER_FW_DOWNLOAD_LOCATION,
3624 &data_written, &change_status,
3625 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003626 }
3627
3628 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3629 flash_cmd.dma);
3630 if (status) {
3631 dev_err(&adapter->pdev->dev,
3632 "Firmware load error. "
3633 "Status code: 0x%x Additional Status: 0x%x\n",
3634 status, add_status);
3635 goto lancer_fw_exit;
3636 }
3637
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003638 if (change_status == LANCER_FW_RESET_NEEDED) {
3639 status = lancer_fw_reset(adapter);
3640 if (status) {
3641 dev_err(&adapter->pdev->dev,
3642 "Adapter busy for FW reset.\n"
3643 "New FW will not be active.\n");
3644 goto lancer_fw_exit;
3645 }
3646 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3647 dev_err(&adapter->pdev->dev,
3648 "System reboot required for new FW"
3649 " to be active\n");
3650 }
3651
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003652 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3653lancer_fw_exit:
3654 return status;
3655}
3656
Sathya Perlaca34fe32012-11-06 17:48:56 +00003657#define UFI_TYPE2 2
3658#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003659#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00003660#define UFI_TYPE4 4
3661static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003662 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003663{
3664 if (fhdr == NULL)
3665 goto be_get_ufi_exit;
3666
Sathya Perlaca34fe32012-11-06 17:48:56 +00003667 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3668 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003669 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3670 if (fhdr->asic_type_rev == 0x10)
3671 return UFI_TYPE3R;
3672 else
3673 return UFI_TYPE3;
3674 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00003675 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003676
3677be_get_ufi_exit:
3678 dev_err(&adapter->pdev->dev,
3679 "UFI and Interface are not compatible for flashing\n");
3680 return -1;
3681}
3682
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003683static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3684{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003685 struct flash_file_hdr_g3 *fhdr3;
3686 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003687 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003688 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003689 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003690
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003691 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003692 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3693 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003694 if (!flash_cmd.va) {
3695 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003696 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003697 }
3698
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003699 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003700 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003701
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003702 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003703
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003704 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3705 for (i = 0; i < num_imgs; i++) {
3706 img_hdr_ptr = (struct image_hdr *)(fw->data +
3707 (sizeof(struct flash_file_hdr_g3) +
3708 i * sizeof(struct image_hdr)));
3709 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003710 switch (ufi_type) {
3711 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003712 status = be_flash_skyhawk(adapter, fw,
3713 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003714 break;
3715 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00003716 status = be_flash_BEx(adapter, fw, &flash_cmd,
3717 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003718 break;
3719 case UFI_TYPE3:
3720 /* Do not flash this ufi on BE3-R cards */
3721 if (adapter->asic_rev < 0x10)
3722 status = be_flash_BEx(adapter, fw,
3723 &flash_cmd,
3724 num_imgs);
3725 else {
3726 status = -1;
3727 dev_err(&adapter->pdev->dev,
3728 "Can't load BE3 UFI on BE3R\n");
3729 }
3730 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003731 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003732 }
3733
Sathya Perlaca34fe32012-11-06 17:48:56 +00003734 if (ufi_type == UFI_TYPE2)
3735 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003736 else if (ufi_type == -1)
3737 status = -1;
3738
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003739 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3740 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003741 if (status) {
3742 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003743 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003744 }
3745
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003746 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003747
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003748be_fw_exit:
3749 return status;
3750}
3751
3752int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3753{
3754 const struct firmware *fw;
3755 int status;
3756
3757 if (!netif_running(adapter->netdev)) {
3758 dev_err(&adapter->pdev->dev,
3759 "Firmware load not allowed (interface is down)\n");
3760 return -1;
3761 }
3762
3763 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3764 if (status)
3765 goto fw_exit;
3766
3767 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3768
3769 if (lancer_chip(adapter))
3770 status = lancer_fw_download(adapter, fw);
3771 else
3772 status = be_fw_download(adapter, fw);
3773
Ajit Khaparde84517482009-09-04 03:12:16 +00003774fw_exit:
3775 release_firmware(fw);
3776 return status;
3777}
3778
stephen hemmingere5686ad2012-01-05 19:10:25 +00003779static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003780 .ndo_open = be_open,
3781 .ndo_stop = be_close,
3782 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003783 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003784 .ndo_set_mac_address = be_mac_addr_set,
3785 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003786 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003787 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003788 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3789 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003790 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003791 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003792 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003793 .ndo_get_vf_config = be_get_vf_config,
3794#ifdef CONFIG_NET_POLL_CONTROLLER
3795 .ndo_poll_controller = be_netpoll,
3796#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003797};
3798
3799static void be_netdev_init(struct net_device *netdev)
3800{
3801 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003802 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003803 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003804
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003805 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003806 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00003807 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003808 if (be_multi_rxq(adapter))
3809 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003810
3811 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00003812 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003813
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003814 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003815 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003816
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003817 netdev->priv_flags |= IFF_UNICAST_FLT;
3818
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003819 netdev->flags |= IFF_MULTICAST;
3820
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00003821 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003822
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003823 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003824
3825 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3826
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003827 for_all_evt_queues(adapter, eqo, i)
3828 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003829}
3830
3831static void be_unmap_pci_bars(struct be_adapter *adapter)
3832{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003833 if (adapter->csr)
3834 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003835 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00003836 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003837}
3838
Sathya Perlace66f782012-11-06 17:48:58 +00003839static int db_bar(struct be_adapter *adapter)
3840{
3841 if (lancer_chip(adapter) || !be_physfn(adapter))
3842 return 0;
3843 else
3844 return 4;
3845}
3846
3847static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00003848{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00003849 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00003850 adapter->roce_db.size = 4096;
3851 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3852 db_bar(adapter));
3853 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3854 db_bar(adapter));
3855 }
Parav Pandit045508a2012-03-26 14:27:13 +00003856 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003857}
3858
3859static int be_map_pci_bars(struct be_adapter *adapter)
3860{
3861 u8 __iomem *addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003862 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003863
Sathya Perlace66f782012-11-06 17:48:58 +00003864 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3865 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3866 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003867
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003868 if (BEx_chip(adapter) && be_physfn(adapter)) {
3869 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3870 if (adapter->csr == NULL)
3871 return -ENOMEM;
3872 }
3873
Sathya Perlace66f782012-11-06 17:48:58 +00003874 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003875 if (addr == NULL)
3876 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003877 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003878
3879 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003880 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00003881
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003882pci_map_err:
3883 be_unmap_pci_bars(adapter);
3884 return -ENOMEM;
3885}
3886
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003887static void be_ctrl_cleanup(struct be_adapter *adapter)
3888{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003889 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003890
3891 be_unmap_pci_bars(adapter);
3892
3893 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003894 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3895 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003896
Sathya Perla5b8821b2011-08-02 19:57:44 +00003897 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003898 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003899 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3900 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003901}
3902
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003903static int be_ctrl_init(struct be_adapter *adapter)
3904{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003905 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3906 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003907 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00003908 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003909 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003910
Sathya Perlace66f782012-11-06 17:48:58 +00003911 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3912 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3913 SLI_INTF_FAMILY_SHIFT;
3914 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3915
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003916 status = be_map_pci_bars(adapter);
3917 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003918 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003919
3920 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003921 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3922 mbox_mem_alloc->size,
3923 &mbox_mem_alloc->dma,
3924 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003925 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003926 status = -ENOMEM;
3927 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003928 }
3929 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3930 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3931 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3932 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003933
Sathya Perla5b8821b2011-08-02 19:57:44 +00003934 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3935 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
Joe Perches1f9061d22013-03-15 07:23:58 +00003936 &rx_filter->dma,
3937 GFP_KERNEL | __GFP_ZERO);
Sathya Perla5b8821b2011-08-02 19:57:44 +00003938 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003939 status = -ENOMEM;
3940 goto free_mbox;
3941 }
Joe Perches1f9061d22013-03-15 07:23:58 +00003942
Ivan Vecera29849612010-12-14 05:43:19 +00003943 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003944 spin_lock_init(&adapter->mcc_lock);
3945 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003946
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003947 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003948 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003949 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003950
3951free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003952 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3953 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003954
3955unmap_pci_bars:
3956 be_unmap_pci_bars(adapter);
3957
3958done:
3959 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003960}
3961
3962static void be_stats_cleanup(struct be_adapter *adapter)
3963{
Sathya Perla3abcded2010-10-03 22:12:27 -07003964 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003965
3966 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003967 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3968 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003969}
3970
3971static int be_stats_init(struct be_adapter *adapter)
3972{
Sathya Perla3abcded2010-10-03 22:12:27 -07003973 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003974
Sathya Perlaca34fe32012-11-06 17:48:56 +00003975 if (lancer_chip(adapter))
3976 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3977 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003978 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Sathya Perlaca34fe32012-11-06 17:48:56 +00003979 else
3980 /* BE3 and Skyhawk */
3981 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3982
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003983 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
Joe Perches1f9061d22013-03-15 07:23:58 +00003984 GFP_KERNEL | __GFP_ZERO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003985 if (cmd->va == NULL)
3986 return -1;
3987 return 0;
3988}
3989
Bill Pemberton3bc6b062012-12-03 09:23:09 -05003990static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003991{
3992 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003993
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003994 if (!adapter)
3995 return;
3996
Parav Pandit045508a2012-03-26 14:27:13 +00003997 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00003998 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00003999
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004000 cancel_delayed_work_sync(&adapter->func_recovery_work);
4001
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004002 unregister_netdev(adapter->netdev);
4003
Sathya Perla5fb379e2009-06-18 00:02:59 +00004004 be_clear(adapter);
4005
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004006 /* tell fw we're done with firing cmds */
4007 be_cmd_fw_clean(adapter);
4008
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004009 be_stats_cleanup(adapter);
4010
4011 be_ctrl_cleanup(adapter);
4012
Sathya Perlad6b6d982012-09-05 01:56:48 +00004013 pci_disable_pcie_error_reporting(pdev);
4014
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004015 pci_set_drvdata(pdev, NULL);
4016 pci_release_regions(pdev);
4017 pci_disable_device(pdev);
4018
4019 free_netdev(adapter->netdev);
4020}
4021
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004022bool be_is_wol_supported(struct be_adapter *adapter)
4023{
4024 return ((adapter->wol_cap & BE_WOL_CAP) &&
4025 !be_is_wol_excluded(adapter)) ? true : false;
4026}
4027
Somnath Kotur941a77d2012-05-17 22:59:03 +00004028u32 be_get_fw_log_level(struct be_adapter *adapter)
4029{
4030 struct be_dma_mem extfat_cmd;
4031 struct be_fat_conf_params *cfgs;
4032 int status;
4033 u32 level = 0;
4034 int j;
4035
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004036 if (lancer_chip(adapter))
4037 return 0;
4038
Somnath Kotur941a77d2012-05-17 22:59:03 +00004039 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4040 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4041 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4042 &extfat_cmd.dma);
4043
4044 if (!extfat_cmd.va) {
4045 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4046 __func__);
4047 goto err;
4048 }
4049
4050 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4051 if (!status) {
4052 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4053 sizeof(struct be_cmd_resp_hdr));
Anton Blanchardac46a462012-07-24 15:05:25 +00004054 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
Somnath Kotur941a77d2012-05-17 22:59:03 +00004055 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4056 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4057 }
4058 }
4059 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4060 extfat_cmd.dma);
4061err:
4062 return level;
4063}
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004064
Sathya Perla39f1d942012-05-08 19:41:24 +00004065static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004066{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004067 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00004068 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004069
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004070 status = be_cmd_get_cntl_attributes(adapter);
4071 if (status)
4072 return status;
4073
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004074 status = be_cmd_get_acpi_wol_cap(adapter);
4075 if (status) {
4076 /* in case of a failure to get wol capabillities
4077 * check the exclusion list to determine WOL capability */
4078 if (!be_is_wol_excluded(adapter))
4079 adapter->wol_cap |= BE_WOL_CAP;
4080 }
4081
4082 if (be_is_wol_supported(adapter))
4083 adapter->wol = true;
4084
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004085 /* Must be a power of 2 or else MODULO will BUG_ON */
4086 adapter->be_get_temp_freq = 64;
4087
Somnath Kotur941a77d2012-05-17 22:59:03 +00004088 level = be_get_fw_log_level(adapter);
4089 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4090
Sathya Perla2243e2e2009-11-22 22:02:03 +00004091 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004092}
4093
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004094static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004095{
4096 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004097
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004098 status = lancer_test_and_set_rdy_state(adapter);
4099 if (status)
4100 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004101
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004102 if (netif_running(adapter->netdev))
4103 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004104
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004105 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004106
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004107 adapter->hw_error = false;
4108 adapter->fw_timeout = false;
4109
4110 status = be_setup(adapter);
4111 if (status)
4112 goto err;
4113
4114 if (netif_running(adapter->netdev)) {
4115 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004116 if (status)
4117 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004118 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004119
4120 dev_err(&adapter->pdev->dev,
4121 "Adapter SLIPORT recovery succeeded\n");
4122 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004123err:
Padmanabh Ratnakar67297ad2012-10-20 06:02:27 +00004124 if (adapter->eeh_error)
4125 dev_err(&adapter->pdev->dev,
4126 "Adapter SLIPORT recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004127
4128 return status;
4129}
4130
4131static void be_func_recovery_task(struct work_struct *work)
4132{
4133 struct be_adapter *adapter =
4134 container_of(work, struct be_adapter, func_recovery_work.work);
4135 int status;
4136
4137 be_detect_error(adapter);
4138
4139 if (adapter->hw_error && lancer_chip(adapter)) {
4140
4141 if (adapter->eeh_error)
4142 goto out;
4143
4144 rtnl_lock();
4145 netif_device_detach(adapter->netdev);
4146 rtnl_unlock();
4147
4148 status = lancer_recover_func(adapter);
4149
4150 if (!status)
4151 netif_device_attach(adapter->netdev);
4152 }
4153
4154out:
4155 schedule_delayed_work(&adapter->func_recovery_work,
4156 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004157}
4158
4159static void be_worker(struct work_struct *work)
4160{
4161 struct be_adapter *adapter =
4162 container_of(work, struct be_adapter, work.work);
4163 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004164 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004165 int i;
4166
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004167 /* when interrupts are not yet enabled, just reap any pending
4168 * mcc completions */
4169 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004170 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004171 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004172 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004173 goto reschedule;
4174 }
4175
4176 if (!adapter->stats_cmd_sent) {
4177 if (lancer_chip(adapter))
4178 lancer_cmd_get_pport_stats(adapter,
4179 &adapter->stats_cmd);
4180 else
4181 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4182 }
4183
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004184 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4185 be_cmd_get_die_temperature(adapter);
4186
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004187 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004188 if (rxo->rx_post_starved) {
4189 rxo->rx_post_starved = false;
4190 be_post_rx_frags(rxo, GFP_KERNEL);
4191 }
4192 }
4193
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004194 for_all_evt_queues(adapter, eqo, i)
4195 be_eqd_update(adapter, eqo);
4196
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004197reschedule:
4198 adapter->work_counter++;
4199 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4200}
4201
Sathya Perla39f1d942012-05-08 19:41:24 +00004202static bool be_reset_required(struct be_adapter *adapter)
4203{
Sathya Perlad79c0a22012-06-05 19:37:22 +00004204 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004205}
4206
Sathya Perlad3791422012-09-28 04:39:44 +00004207static char *mc_name(struct be_adapter *adapter)
4208{
4209 if (adapter->function_mode & FLEX10_MODE)
4210 return "FLEX10";
4211 else if (adapter->function_mode & VNIC_MODE)
4212 return "vNIC";
4213 else if (adapter->function_mode & UMC_ENABLED)
4214 return "UMC";
4215 else
4216 return "";
4217}
4218
4219static inline char *func_name(struct be_adapter *adapter)
4220{
4221 return be_physfn(adapter) ? "PF" : "VF";
4222}
4223
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004224static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004225{
4226 int status = 0;
4227 struct be_adapter *adapter;
4228 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004229 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004230
4231 status = pci_enable_device(pdev);
4232 if (status)
4233 goto do_none;
4234
4235 status = pci_request_regions(pdev, DRV_NAME);
4236 if (status)
4237 goto disable_dev;
4238 pci_set_master(pdev);
4239
Sathya Perla7f640062012-06-05 19:37:20 +00004240 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004241 if (netdev == NULL) {
4242 status = -ENOMEM;
4243 goto rel_reg;
4244 }
4245 adapter = netdev_priv(netdev);
4246 adapter->pdev = pdev;
4247 pci_set_drvdata(pdev, adapter);
4248 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004249 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004250
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004251 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004252 if (!status) {
Craig Hada2bd92cd2013-04-21 23:28:18 +00004253 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4254 if (status < 0) {
4255 dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4256 goto free_netdev;
4257 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004258 netdev->features |= NETIF_F_HIGHDMA;
4259 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004260 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004261 if (status) {
4262 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4263 goto free_netdev;
4264 }
4265 }
4266
Sathya Perlad6b6d982012-09-05 01:56:48 +00004267 status = pci_enable_pcie_error_reporting(pdev);
4268 if (status)
4269 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4270
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004271 status = be_ctrl_init(adapter);
4272 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004273 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004274
Sathya Perla2243e2e2009-11-22 22:02:03 +00004275 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004276 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004277 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004278 if (status)
4279 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004280 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004281
Sathya Perla39f1d942012-05-08 19:41:24 +00004282 if (be_reset_required(adapter)) {
4283 status = be_cmd_reset_function(adapter);
4284 if (status)
4285 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004286
Kalesh AP2d177be2013-04-28 22:22:29 +00004287 /* Wait for interrupts to quiesce after an FLR */
4288 msleep(100);
4289 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004290
4291 /* Allow interrupts for other ULPs running on NIC function */
4292 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004293
Kalesh AP2d177be2013-04-28 22:22:29 +00004294 /* tell fw we're ready to fire cmds */
4295 status = be_cmd_fw_init(adapter);
4296 if (status)
4297 goto ctrl_clean;
4298
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004299 status = be_stats_init(adapter);
4300 if (status)
4301 goto ctrl_clean;
4302
Sathya Perla39f1d942012-05-08 19:41:24 +00004303 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004304 if (status)
4305 goto stats_clean;
4306
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004307 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004308 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004309 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004310
Sathya Perla5fb379e2009-06-18 00:02:59 +00004311 status = be_setup(adapter);
4312 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004313 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004314
Sathya Perla3abcded2010-10-03 22:12:27 -07004315 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004316 status = register_netdev(netdev);
4317 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004318 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004319
Parav Pandit045508a2012-03-26 14:27:13 +00004320 be_roce_dev_add(adapter);
4321
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004322 schedule_delayed_work(&adapter->func_recovery_work,
4323 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004324
4325 be_cmd_query_port_name(adapter, &port_name);
4326
Sathya Perlad3791422012-09-28 04:39:44 +00004327 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4328 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004329
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004330 return 0;
4331
Sathya Perla5fb379e2009-06-18 00:02:59 +00004332unsetup:
4333 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004334stats_clean:
4335 be_stats_cleanup(adapter);
4336ctrl_clean:
4337 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004338free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004339 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004340 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004341rel_reg:
4342 pci_release_regions(pdev);
4343disable_dev:
4344 pci_disable_device(pdev);
4345do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004346 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004347 return status;
4348}
4349
4350static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4351{
4352 struct be_adapter *adapter = pci_get_drvdata(pdev);
4353 struct net_device *netdev = adapter->netdev;
4354
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004355 if (adapter->wol)
4356 be_setup_wol(adapter, true);
4357
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004358 cancel_delayed_work_sync(&adapter->func_recovery_work);
4359
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004360 netif_device_detach(netdev);
4361 if (netif_running(netdev)) {
4362 rtnl_lock();
4363 be_close(netdev);
4364 rtnl_unlock();
4365 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004366 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004367
4368 pci_save_state(pdev);
4369 pci_disable_device(pdev);
4370 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4371 return 0;
4372}
4373
4374static int be_resume(struct pci_dev *pdev)
4375{
4376 int status = 0;
4377 struct be_adapter *adapter = pci_get_drvdata(pdev);
4378 struct net_device *netdev = adapter->netdev;
4379
4380 netif_device_detach(netdev);
4381
4382 status = pci_enable_device(pdev);
4383 if (status)
4384 return status;
4385
4386 pci_set_power_state(pdev, 0);
4387 pci_restore_state(pdev);
4388
Sathya Perla2243e2e2009-11-22 22:02:03 +00004389 /* tell fw we're ready to fire cmds */
4390 status = be_cmd_fw_init(adapter);
4391 if (status)
4392 return status;
4393
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004394 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004395 if (netif_running(netdev)) {
4396 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004397 be_open(netdev);
4398 rtnl_unlock();
4399 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004400
4401 schedule_delayed_work(&adapter->func_recovery_work,
4402 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004403 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004404
4405 if (adapter->wol)
4406 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004407
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004408 return 0;
4409}
4410
Sathya Perla82456b02010-02-17 01:35:37 +00004411/*
4412 * An FLR will stop BE from DMAing any data.
4413 */
4414static void be_shutdown(struct pci_dev *pdev)
4415{
4416 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004417
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004418 if (!adapter)
4419 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004420
Sathya Perla0f4a6822011-03-21 20:49:28 +00004421 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004422 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004423
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004424 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004425
Ajit Khaparde57841862011-04-06 18:08:43 +00004426 be_cmd_reset_function(adapter);
4427
Sathya Perla82456b02010-02-17 01:35:37 +00004428 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004429}
4430
Sathya Perlacf588472010-02-14 21:22:01 +00004431static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4432 pci_channel_state_t state)
4433{
4434 struct be_adapter *adapter = pci_get_drvdata(pdev);
4435 struct net_device *netdev = adapter->netdev;
4436
4437 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4438
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004439 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004440
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004441 cancel_delayed_work_sync(&adapter->func_recovery_work);
4442
4443 rtnl_lock();
Sathya Perlacf588472010-02-14 21:22:01 +00004444 netif_device_detach(netdev);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004445 rtnl_unlock();
Sathya Perlacf588472010-02-14 21:22:01 +00004446
4447 if (netif_running(netdev)) {
4448 rtnl_lock();
4449 be_close(netdev);
4450 rtnl_unlock();
4451 }
4452 be_clear(adapter);
4453
4454 if (state == pci_channel_io_perm_failure)
4455 return PCI_ERS_RESULT_DISCONNECT;
4456
4457 pci_disable_device(pdev);
4458
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004459 /* The error could cause the FW to trigger a flash debug dump.
4460 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004461 * can cause it not to recover; wait for it to finish.
4462 * Wait only for first function as it is needed only once per
4463 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004464 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004465 if (pdev->devfn == 0)
4466 ssleep(30);
4467
Sathya Perlacf588472010-02-14 21:22:01 +00004468 return PCI_ERS_RESULT_NEED_RESET;
4469}
4470
4471static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4472{
4473 struct be_adapter *adapter = pci_get_drvdata(pdev);
4474 int status;
4475
4476 dev_info(&adapter->pdev->dev, "EEH reset\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004477 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004478
4479 status = pci_enable_device(pdev);
4480 if (status)
4481 return PCI_ERS_RESULT_DISCONNECT;
4482
4483 pci_set_master(pdev);
4484 pci_set_power_state(pdev, 0);
4485 pci_restore_state(pdev);
4486
4487 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004488 dev_info(&adapter->pdev->dev,
4489 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004490 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004491 if (status)
4492 return PCI_ERS_RESULT_DISCONNECT;
4493
Sathya Perlad6b6d982012-09-05 01:56:48 +00004494 pci_cleanup_aer_uncorrect_error_status(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004495 return PCI_ERS_RESULT_RECOVERED;
4496}
4497
4498static void be_eeh_resume(struct pci_dev *pdev)
4499{
4500 int status = 0;
4501 struct be_adapter *adapter = pci_get_drvdata(pdev);
4502 struct net_device *netdev = adapter->netdev;
4503
4504 dev_info(&adapter->pdev->dev, "EEH resume\n");
4505
4506 pci_save_state(pdev);
4507
Kalesh AP2d177be2013-04-28 22:22:29 +00004508 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004509 if (status)
4510 goto err;
4511
Kalesh AP2d177be2013-04-28 22:22:29 +00004512 /* tell fw we're ready to fire cmds */
4513 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004514 if (status)
4515 goto err;
4516
Sathya Perlacf588472010-02-14 21:22:01 +00004517 status = be_setup(adapter);
4518 if (status)
4519 goto err;
4520
4521 if (netif_running(netdev)) {
4522 status = be_open(netdev);
4523 if (status)
4524 goto err;
4525 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004526
4527 schedule_delayed_work(&adapter->func_recovery_work,
4528 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004529 netif_device_attach(netdev);
4530 return;
4531err:
4532 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004533}
4534
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004535static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004536 .error_detected = be_eeh_err_detected,
4537 .slot_reset = be_eeh_reset,
4538 .resume = be_eeh_resume,
4539};
4540
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004541static struct pci_driver be_driver = {
4542 .name = DRV_NAME,
4543 .id_table = be_dev_ids,
4544 .probe = be_probe,
4545 .remove = be_remove,
4546 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004547 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004548 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004549 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004550};
4551
4552static int __init be_init_module(void)
4553{
Joe Perches8e95a202009-12-03 07:58:21 +00004554 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4555 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004556 printk(KERN_WARNING DRV_NAME
4557 " : Module param rx_frag_size must be 2048/4096/8192."
4558 " Using 2048\n");
4559 rx_frag_size = 2048;
4560 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004561
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004562 return pci_register_driver(&be_driver);
4563}
4564module_init(be_init_module);
4565
4566static void __exit be_exit_module(void)
4567{
4568 pci_unregister_driver(&be_driver);
4569}
4570module_exit(be_exit_module);