blob: 26e222f8143371ea7bb2f99e7197c81a071fc772 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamc7bb15a2013-03-06 20:05:05 +00002 * Copyright (C) 2005 - 2013 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070024
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000028MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_LICENSE("GPL");
30
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla11ac75e2011-12-13 00:58:50 +000035static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070048 { 0 }
49};
50MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000051/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070052static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000053 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85};
86/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070087static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000088 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700111 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700121
Sathya Perla752961a2011-10-24 02:45:03 +0000122/* Is BE in a multi-channel mode */
123static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127}
128
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700129static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130{
131 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000135 mem->va = NULL;
136 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
Joe Perches1f9061d22013-03-15 07:23:58 +0000149 GFP_KERNEL | __GFP_ZERO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000151 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 return 0;
153}
154
Somnath Kotur68c45a22013-03-14 02:42:07 +0000155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169
Sathya Perladb3ea782011-08-22 19:41:52 +0000170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Somnath Kotur68c45a22013-03-14 02:42:07 +0000174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
Sathya Perla8788fdc2009-07-27 22:52:03 +0000190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700191{
192 u32 val = 0;
193 val |= qid & DB_RQ_RING_ID_MASK;
194 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000195
196 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000197 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700198}
199
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000200static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
201 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202{
203 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000206
207 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000208 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209}
210
Sathya Perla8788fdc2009-07-27 22:52:03 +0000211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700212 bool arm, bool clear_int, u16 num_popped)
213{
214 u32 val = 0;
215 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000216 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000218
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000219 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000220 return;
221
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700229}
230
Sathya Perla8788fdc2009-07-27 22:52:03 +0000231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232{
233 u32 val = 0;
234 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000235 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000237
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000238 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000239 return;
240
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241 if (arm)
242 val |= 1 << DB_CQ_REARM_SHIFT;
243 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000244 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245}
246
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247static int be_mac_addr_set(struct net_device *netdev, void *p)
248{
249 struct be_adapter *adapter = netdev_priv(netdev);
250 struct sockaddr *addr = p;
251 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000252 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000253 u32 pmac_id = adapter->pmac_id[0];
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000254 bool active_mac = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700255
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000256 if (!is_valid_ether_addr(addr->sa_data))
257 return -EADDRNOTAVAIL;
258
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000259 /* For BE VF, MAC address is already activated by PF.
260 * Hence only operation left is updating netdev->devaddr.
261 * Update it if user is passing the same MAC which was used
262 * during configuring VF MAC from PF(Hypervisor).
263 */
264 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
265 status = be_cmd_mac_addr_query(adapter, current_mac,
266 false, adapter->if_handle, 0);
267 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
268 goto done;
269 else
270 goto err;
271 }
272
273 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
274 goto done;
275
276 /* For Lancer check if any MAC is active.
277 * If active, get its mac id.
278 */
279 if (lancer_chip(adapter) && !be_physfn(adapter))
280 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
281 &pmac_id, 0);
282
283 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
284 adapter->if_handle,
285 &adapter->pmac_id[0], 0);
286
Sathya Perlaa65027e2009-08-17 00:58:04 +0000287 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000288 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700289
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000290 if (active_mac)
291 be_cmd_pmac_del(adapter, adapter->if_handle,
292 pmac_id, 0);
293done:
Somnath Koture3a7ae22011-10-27 07:14:05 +0000294 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
295 return 0;
296err:
297 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700298 return status;
299}
300
Sathya Perlaca34fe32012-11-06 17:48:56 +0000301/* BE2 supports only v0 cmd */
302static void *hw_stats_from_cmd(struct be_adapter *adapter)
303{
304 if (BE2_chip(adapter)) {
305 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
306
307 return &cmd->hw_stats;
308 } else {
309 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
310
311 return &cmd->hw_stats;
312 }
313}
314
315/* BE2 supports only v0 cmd */
316static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
317{
318 if (BE2_chip(adapter)) {
319 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
320
321 return &hw_stats->erx;
322 } else {
323 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
324
325 return &hw_stats->erx;
326 }
327}
328
329static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000330{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000334 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000335 &rxf_stats->port[adapter->port_num];
336 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000337
Sathya Perlaac124ff2011-07-25 19:10:14 +0000338 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000339 drvs->rx_pause_frames = port_stats->rx_pause_frames;
340 drvs->rx_crc_errors = port_stats->rx_crc_errors;
341 drvs->rx_control_frames = port_stats->rx_control_frames;
342 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000354 drvs->rx_dropped_header_too_small =
355 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000356 drvs->rx_address_filtered =
357 port_stats->rx_address_filtered +
358 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000359 drvs->rx_alignment_symbol_errors =
360 port_stats->rx_alignment_symbol_errors;
361
362 drvs->tx_pauseframes = port_stats->tx_pauseframes;
363 drvs->tx_controlframes = port_stats->tx_controlframes;
364
365 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000366 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000367 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000368 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000369 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000371 drvs->forwarded_packets = rxf_stats->forwarded_packets;
372 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000373 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000375 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
376}
377
Sathya Perlaca34fe32012-11-06 17:48:56 +0000378static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000379{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000380 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000383 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 &rxf_stats->port[adapter->port_num];
385 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386
Sathya Perlaac124ff2011-07-25 19:10:14 +0000387 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000388 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390 drvs->rx_pause_frames = port_stats->rx_pause_frames;
391 drvs->rx_crc_errors = port_stats->rx_crc_errors;
392 drvs->rx_control_frames = port_stats->rx_control_frames;
393 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403 drvs->rx_dropped_header_too_small =
404 port_stats->rx_dropped_header_too_small;
405 drvs->rx_input_fifo_overflow_drop =
406 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000407 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000408 drvs->rx_alignment_symbol_errors =
409 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000410 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000411 drvs->tx_pauseframes = port_stats->tx_pauseframes;
412 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000413 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000414 drvs->jabber_events = port_stats->jabber_events;
415 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000416 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000417 drvs->forwarded_packets = rxf_stats->forwarded_packets;
418 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000419 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
420 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
422}
423
Selvin Xavier005d5692011-05-16 07:36:35 +0000424static void populate_lancer_stats(struct be_adapter *adapter)
425{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000426
Selvin Xavier005d5692011-05-16 07:36:35 +0000427 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000428 struct lancer_pport_stats *pport_stats =
429 pport_stats_from_cmd(adapter);
430
431 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
432 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
433 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
434 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000435 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000436 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000437 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
438 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
439 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
440 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
441 drvs->rx_dropped_tcp_length =
442 pport_stats->rx_dropped_invalid_tcp_length;
443 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
444 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
445 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
446 drvs->rx_dropped_header_too_small =
447 pport_stats->rx_dropped_header_too_small;
448 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000449 drvs->rx_address_filtered =
450 pport_stats->rx_address_filtered +
451 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000452 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000453 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000454 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
455 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000456 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000457 drvs->forwarded_packets = pport_stats->num_forwards_lo;
458 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000459 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000460 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000461}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000462
Sathya Perla09c1c682011-08-22 19:41:53 +0000463static void accumulate_16bit_val(u32 *acc, u16 val)
464{
465#define lo(x) (x & 0xFFFF)
466#define hi(x) (x & 0xFFFF0000)
467 bool wrapped = val < lo(*acc);
468 u32 newacc = hi(*acc) + val;
469
470 if (wrapped)
471 newacc += 65536;
472 ACCESS_ONCE(*acc) = newacc;
473}
474
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000475void populate_erx_stats(struct be_adapter *adapter,
476 struct be_rx_obj *rxo,
477 u32 erx_stat)
478{
479 if (!BEx_chip(adapter))
480 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
481 else
482 /* below erx HW counter can actually wrap around after
483 * 65535. Driver accumulates a 32-bit value
484 */
485 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
486 (u16)erx_stat);
487}
488
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000489void be_parse_stats(struct be_adapter *adapter)
490{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000491 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
492 struct be_rx_obj *rxo;
493 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000494 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000495
Sathya Perlaca34fe32012-11-06 17:48:56 +0000496 if (lancer_chip(adapter)) {
497 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000498 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000499 if (BE2_chip(adapter))
500 populate_be_v0_stats(adapter);
501 else
502 /* for BE3 and Skyhawk */
503 populate_be_v1_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000504
Sathya Perlaca34fe32012-11-06 17:48:56 +0000505 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
506 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000507 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
508 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000509 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000510 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000511}
512
Sathya Perlaab1594e2011-07-25 19:10:15 +0000513static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
514 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700515{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000516 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000517 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700518 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000519 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000520 u64 pkts, bytes;
521 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700522 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700523
Sathya Perla3abcded2010-10-03 22:12:27 -0700524 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000525 const struct be_rx_stats *rx_stats = rx_stats(rxo);
526 do {
527 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
528 pkts = rx_stats(rxo)->rx_pkts;
529 bytes = rx_stats(rxo)->rx_bytes;
530 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
531 stats->rx_packets += pkts;
532 stats->rx_bytes += bytes;
533 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
534 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
535 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700536 }
537
Sathya Perla3c8def92011-06-12 20:01:58 +0000538 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000539 const struct be_tx_stats *tx_stats = tx_stats(txo);
540 do {
541 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
542 pkts = tx_stats(txo)->tx_pkts;
543 bytes = tx_stats(txo)->tx_bytes;
544 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
545 stats->tx_packets += pkts;
546 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000547 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700548
549 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000550 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000551 drvs->rx_alignment_symbol_errors +
552 drvs->rx_in_range_errors +
553 drvs->rx_out_range_errors +
554 drvs->rx_frame_too_long +
555 drvs->rx_dropped_too_small +
556 drvs->rx_dropped_too_short +
557 drvs->rx_dropped_header_too_small +
558 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000559 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700560
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700561 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000562 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000563 drvs->rx_out_range_errors +
564 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000565
Sathya Perlaab1594e2011-07-25 19:10:15 +0000566 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700567
568 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000569 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000570
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700571 /* receiver fifo overrun */
572 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000573 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000574 drvs->rx_input_fifo_overflow_drop +
575 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000576 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700577}
578
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000579void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700580{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700581 struct net_device *netdev = adapter->netdev;
582
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000583 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000584 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000585 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700586 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000587
588 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
589 netif_carrier_on(netdev);
590 else
591 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700592}
593
Sathya Perla3c8def92011-06-12 20:01:58 +0000594static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000595 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700596{
Sathya Perla3c8def92011-06-12 20:01:58 +0000597 struct be_tx_stats *stats = tx_stats(txo);
598
Sathya Perlaab1594e2011-07-25 19:10:15 +0000599 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000600 stats->tx_reqs++;
601 stats->tx_wrbs += wrb_cnt;
602 stats->tx_bytes += copied;
603 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700604 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000605 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000606 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700607}
608
609/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000610static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
611 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700612{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700613 int cnt = (skb->len > skb->data_len);
614
615 cnt += skb_shinfo(skb)->nr_frags;
616
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700617 /* to account for hdr wrb */
618 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000619 if (lancer_chip(adapter) || !(cnt & 1)) {
620 *dummy = false;
621 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700622 /* add a dummy to make it an even num */
623 cnt++;
624 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000625 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700626 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
627 return cnt;
628}
629
630static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
631{
632 wrb->frag_pa_hi = upper_32_bits(addr);
633 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
634 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000635 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636}
637
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000638static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
639 struct sk_buff *skb)
640{
641 u8 vlan_prio;
642 u16 vlan_tag;
643
644 vlan_tag = vlan_tx_tag_get(skb);
645 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
646 /* If vlan priority provided by OS is NOT in available bmap */
647 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
648 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
649 adapter->recommended_prio;
650
651 return vlan_tag;
652}
653
Somnath Koturcc4ce022010-10-21 07:11:14 -0700654static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000655 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700656{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000657 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700658
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700659 memset(hdr, 0, sizeof(*hdr));
660
661 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
662
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000663 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
666 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000667 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000668 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700669 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
670 if (is_tcp_pkt(skb))
671 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
672 else if (is_udp_pkt(skb))
673 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
674 }
675
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700676 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700677 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000678 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700679 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700680 }
681
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000682 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
683 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700685 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
686 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
687}
688
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000689static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000690 bool unmap_single)
691{
692 dma_addr_t dma;
693
694 be_dws_le_to_cpu(wrb, sizeof(*wrb));
695
696 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000697 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000698 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000699 dma_unmap_single(dev, dma, wrb->frag_len,
700 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000701 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000702 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000703 }
704}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700705
Sathya Perla3c8def92011-06-12 20:01:58 +0000706static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000707 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
708 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700709{
Sathya Perla7101e112010-03-22 20:41:12 +0000710 dma_addr_t busaddr;
711 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000712 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700713 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700714 struct be_eth_wrb *wrb;
715 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000716 bool map_single = false;
717 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700718
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700719 hdr = queue_head_node(txq);
720 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000721 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700722
David S. Millerebc8d2a2009-06-09 01:01:31 -0700723 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700724 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000725 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
726 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000727 goto dma_err;
728 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700729 wrb = queue_head_node(txq);
730 wrb_fill(wrb, busaddr, len);
731 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732 queue_head_inc(txq);
733 copied += len;
734 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700735
David S. Millerebc8d2a2009-06-09 01:01:31 -0700736 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000737 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700738 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000739 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000740 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000741 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000742 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700743 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000744 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700745 be_dws_cpu_to_le(wrb, sizeof(*wrb));
746 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000747 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700748 }
749
750 if (dummy_wrb) {
751 wrb = queue_head_node(txq);
752 wrb_fill(wrb, 0, 0);
753 be_dws_cpu_to_le(wrb, sizeof(*wrb));
754 queue_head_inc(txq);
755 }
756
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000757 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700758 be_dws_cpu_to_le(hdr, sizeof(*hdr));
759
760 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000761dma_err:
762 txq->head = map_head;
763 while (copied) {
764 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000765 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000766 map_single = false;
767 copied -= wrb->frag_len;
768 queue_head_inc(txq);
769 }
770 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700771}
772
Somnath Kotur93040ae2012-06-26 22:32:10 +0000773static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000774 struct sk_buff *skb,
775 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000776{
777 u16 vlan_tag = 0;
778
779 skb = skb_share_check(skb, GFP_ATOMIC);
780 if (unlikely(!skb))
781 return skb;
782
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000783 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000784 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000785 else if (qnq_async_evt_rcvd(adapter) && adapter->pvid)
786 vlan_tag = adapter->pvid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000787
788 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400789 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000790 if (unlikely(!skb))
791 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000792 skb->vlan_tci = 0;
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000793 if (skip_hw_vlan)
794 *skip_hw_vlan = true;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000795 }
796
797 /* Insert the outer VLAN, if any */
798 if (adapter->qnq_vid) {
799 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400800 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000801 if (unlikely(!skb))
802 return skb;
803 if (skip_hw_vlan)
804 *skip_hw_vlan = true;
805 }
806
Somnath Kotur93040ae2012-06-26 22:32:10 +0000807 return skb;
808}
809
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000810static bool be_ipv6_exthdr_check(struct sk_buff *skb)
811{
812 struct ethhdr *eh = (struct ethhdr *)skb->data;
813 u16 offset = ETH_HLEN;
814
815 if (eh->h_proto == htons(ETH_P_IPV6)) {
816 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
817
818 offset += sizeof(struct ipv6hdr);
819 if (ip6h->nexthdr != NEXTHDR_TCP &&
820 ip6h->nexthdr != NEXTHDR_UDP) {
821 struct ipv6_opt_hdr *ehdr =
822 (struct ipv6_opt_hdr *) (skb->data + offset);
823
824 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
825 if (ehdr->hdrlen == 0xff)
826 return true;
827 }
828 }
829 return false;
830}
831
832static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
833{
834 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
835}
836
Sathya Perlaee9c7992013-05-22 23:04:55 +0000837static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
838 struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000839{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000840 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000841}
842
Sathya Perlaee9c7992013-05-22 23:04:55 +0000843static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
844 struct sk_buff *skb,
845 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700846{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000847 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000848 unsigned int eth_hdr_len;
849 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000850
Somnath Kotur48265662013-05-26 21:08:47 +0000851 /* Lancer ASIC has a bug wherein packets that are 32 bytes or less
852 * may cause a transmit stall on that port. So the work-around is to
853 * pad such packets to a 36-byte length.
854 */
855 if (unlikely(lancer_chip(adapter) && skb->len <= 32)) {
856 if (skb_padto(skb, 36))
857 goto tx_drop;
858 skb->len = 36;
859 }
860
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000861 /* For padded packets, BE HW modifies tot_len field in IP header
862 * incorrecly when VLAN tag is inserted by HW.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000863 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000864 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
865 VLAN_ETH_HLEN : ETH_HLEN;
866 if (skb->len <= 60 && vlan_tx_tag_present(skb) &&
867 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000868 ip = (struct iphdr *)ip_hdr(skb);
869 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
870 }
871
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000872 /* If vlan tag is already inlined in the packet, skip HW VLAN
873 * tagging in UMC mode
874 */
875 if ((adapter->function_mode & UMC_ENABLED) &&
876 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perlaee9c7992013-05-22 23:04:55 +0000877 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000878
Somnath Kotur93040ae2012-06-26 22:32:10 +0000879 /* HW has a bug wherein it will calculate CSUM for VLAN
880 * pkts even though it is disabled.
881 * Manually insert VLAN in pkt.
882 */
883 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000884 vlan_tx_tag_present(skb)) {
885 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000886 if (unlikely(!skb))
887 goto tx_drop;
888 }
889
890 /* HW may lockup when VLAN HW tagging is requested on
891 * certain ipv6 packets. Drop such pkts if the HW workaround to
892 * skip HW tagging is not enabled by FW.
893 */
894 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000895 (adapter->pvid || adapter->qnq_vid) &&
896 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000897 goto tx_drop;
898
899 /* Manual VLAN tag insertion to prevent:
900 * ASIC lockup when the ASIC inserts VLAN tag into
901 * certain ipv6 packets. Insert VLAN tags in driver,
902 * and set event, completion, vlan bits accordingly
903 * in the Tx WRB.
904 */
905 if (be_ipv6_tx_stall_chk(adapter, skb) &&
906 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000907 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000908 if (unlikely(!skb))
909 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000910 }
911
Sathya Perlaee9c7992013-05-22 23:04:55 +0000912 return skb;
913tx_drop:
914 dev_kfree_skb_any(skb);
915 return NULL;
916}
917
918static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
919{
920 struct be_adapter *adapter = netdev_priv(netdev);
921 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
922 struct be_queue_info *txq = &txo->q;
923 bool dummy_wrb, stopped = false;
924 u32 wrb_cnt = 0, copied = 0;
925 bool skip_hw_vlan = false;
926 u32 start = txq->head;
927
928 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
929 if (!skb)
930 return NETDEV_TX_OK;
931
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000932 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700933
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000934 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
935 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000936 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000937 int gso_segs = skb_shinfo(skb)->gso_segs;
938
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000939 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000940 BUG_ON(txo->sent_skb_list[start]);
941 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700942
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000943 /* Ensure txq has space for the next skb; Else stop the queue
944 * *BEFORE* ringing the tx doorbell, so that we serialze the
945 * tx compls of the current transmit which'll wake up the queue
946 */
Sathya Perla7101e112010-03-22 20:41:12 +0000947 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000948 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
949 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000950 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000951 stopped = true;
952 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700953
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000954 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000955
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000956 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000957 } else {
958 txq->head = start;
959 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700960 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700961 return NETDEV_TX_OK;
962}
963
964static int be_change_mtu(struct net_device *netdev, int new_mtu)
965{
966 struct be_adapter *adapter = netdev_priv(netdev);
967 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000968 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
969 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700970 dev_info(&adapter->pdev->dev,
971 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000972 BE_MIN_MTU,
973 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700974 return -EINVAL;
975 }
976 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
977 netdev->mtu, new_mtu);
978 netdev->mtu = new_mtu;
979 return 0;
980}
981
982/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000983 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
984 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700985 */
Sathya Perla10329df2012-06-05 19:37:18 +0000986static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700987{
Sathya Perla10329df2012-06-05 19:37:18 +0000988 u16 vids[BE_NUM_VLANS_SUPPORTED];
989 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000990 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000991
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000992 /* No need to further configure vids if in promiscuous mode */
993 if (adapter->promiscuous)
994 return 0;
995
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000996 if (adapter->vlans_added > adapter->max_vlans)
997 goto set_vlan_promisc;
998
999 /* Construct VLAN Table to give to HW */
1000 for (i = 0; i < VLAN_N_VID; i++)
1001 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +00001002 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001003
1004 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +00001005 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001006
1007 /* Set to VLAN promisc mode as setting VLAN filter failed */
1008 if (status) {
1009 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1010 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
1011 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001012 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001013
Sathya Perlab31c50a2009-09-17 10:30:13 -07001014 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001015
1016set_vlan_promisc:
1017 status = be_cmd_vlan_config(adapter, adapter->if_handle,
1018 NULL, 0, 1, 1);
1019 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001020}
1021
Patrick McHardy80d5c362013-04-19 02:04:28 +00001022static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001023{
1024 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001025 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001026
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001027 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001028 status = -EINVAL;
1029 goto ret;
1030 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001031
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001032 /* Packets with VID 0 are always received by Lancer by default */
1033 if (lancer_chip(adapter) && vid == 0)
1034 goto ret;
1035
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001036 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001037 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Sathya Perla10329df2012-06-05 19:37:18 +00001038 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001039
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001040 if (!status)
1041 adapter->vlans_added++;
1042 else
1043 adapter->vlan_tag[vid] = 0;
1044ret:
1045 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001046}
1047
Patrick McHardy80d5c362013-04-19 02:04:28 +00001048static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001049{
1050 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001051 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001052
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001053 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001054 status = -EINVAL;
1055 goto ret;
1056 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001057
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001058 /* Packets with VID 0 are always received by Lancer by default */
1059 if (lancer_chip(adapter) && vid == 0)
1060 goto ret;
1061
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001062 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001063 if (adapter->vlans_added <= adapter->max_vlans)
Sathya Perla10329df2012-06-05 19:37:18 +00001064 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001065
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001066 if (!status)
1067 adapter->vlans_added--;
1068 else
1069 adapter->vlan_tag[vid] = 1;
1070ret:
1071 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001072}
1073
Sathya Perlaa54769f2011-10-24 02:45:00 +00001074static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001075{
1076 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001077 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001078
1079 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001080 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001081 adapter->promiscuous = true;
1082 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001083 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001084
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001085 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001086 if (adapter->promiscuous) {
1087 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +00001088 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001089
1090 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001091 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001092 }
1093
Sathya Perlae7b909a2009-11-22 22:01:10 +00001094 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001095 if (netdev->flags & IFF_ALLMULTI ||
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001096 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001097 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001098 goto done;
1099 }
1100
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001101 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1102 struct netdev_hw_addr *ha;
1103 int i = 1; /* First slot is claimed by the Primary MAC */
1104
1105 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1106 be_cmd_pmac_del(adapter, adapter->if_handle,
1107 adapter->pmac_id[i], 0);
1108 }
1109
1110 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
1111 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1112 adapter->promiscuous = true;
1113 goto done;
1114 }
1115
1116 netdev_for_each_uc_addr(ha, adapter->netdev) {
1117 adapter->uc_macs++; /* First slot is for Primary MAC */
1118 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1119 adapter->if_handle,
1120 &adapter->pmac_id[adapter->uc_macs], 0);
1121 }
1122 }
1123
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001124 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1125
1126 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1127 if (status) {
1128 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1129 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1130 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1131 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001132done:
1133 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001134}
1135
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001136static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1137{
1138 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001139 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001140 int status;
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +00001141 bool active_mac = false;
1142 u32 pmac_id;
1143 u8 old_mac[ETH_ALEN];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001144
Sathya Perla11ac75e2011-12-13 00:58:50 +00001145 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001146 return -EPERM;
1147
Sathya Perla11ac75e2011-12-13 00:58:50 +00001148 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001149 return -EINVAL;
1150
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001151 if (lancer_chip(adapter)) {
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +00001152 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1153 &pmac_id, vf + 1);
1154 if (!status && active_mac)
1155 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1156 pmac_id, vf + 1);
1157
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001158 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1159 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +00001160 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1161 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001162
Sathya Perla11ac75e2011-12-13 00:58:50 +00001163 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1164 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001165 }
1166
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001167 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001168 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1169 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001170 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001171 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001172
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001173 return status;
1174}
1175
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001176static int be_get_vf_config(struct net_device *netdev, int vf,
1177 struct ifla_vf_info *vi)
1178{
1179 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001180 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001181
Sathya Perla11ac75e2011-12-13 00:58:50 +00001182 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001183 return -EPERM;
1184
Sathya Perla11ac75e2011-12-13 00:58:50 +00001185 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001186 return -EINVAL;
1187
1188 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001189 vi->tx_rate = vf_cfg->tx_rate;
1190 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001191 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001192 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001193
1194 return 0;
1195}
1196
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001197static int be_set_vf_vlan(struct net_device *netdev,
1198 int vf, u16 vlan, u8 qos)
1199{
1200 struct be_adapter *adapter = netdev_priv(netdev);
1201 int status = 0;
1202
Sathya Perla11ac75e2011-12-13 00:58:50 +00001203 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001204 return -EPERM;
1205
Sathya Perla11ac75e2011-12-13 00:58:50 +00001206 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001207 return -EINVAL;
1208
1209 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001210 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1211 /* If this is new value, program it. Else skip. */
1212 adapter->vf_cfg[vf].vlan_tag = vlan;
1213
1214 status = be_cmd_set_hsw_config(adapter, vlan,
1215 vf + 1, adapter->vf_cfg[vf].if_handle);
1216 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001217 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001218 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001219 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001220 vlan = adapter->vf_cfg[vf].def_vid;
1221 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1222 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001223 }
1224
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001225
1226 if (status)
1227 dev_info(&adapter->pdev->dev,
1228 "VLAN %d config on VF %d failed\n", vlan, vf);
1229 return status;
1230}
1231
Ajit Khapardee1d18732010-07-23 01:52:13 +00001232static int be_set_vf_tx_rate(struct net_device *netdev,
1233 int vf, int rate)
1234{
1235 struct be_adapter *adapter = netdev_priv(netdev);
1236 int status = 0;
1237
Sathya Perla11ac75e2011-12-13 00:58:50 +00001238 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001239 return -EPERM;
1240
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001241 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001242 return -EINVAL;
1243
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001244 if (rate < 100 || rate > 10000) {
1245 dev_err(&adapter->pdev->dev,
1246 "tx rate must be between 100 and 10000 Mbps\n");
1247 return -EINVAL;
1248 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001249
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001250 if (lancer_chip(adapter))
1251 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1252 else
1253 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001254
1255 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001256 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001257 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001258 else
1259 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001260 return status;
1261}
1262
Sathya Perla39f1d942012-05-08 19:41:24 +00001263static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1264{
1265 struct pci_dev *dev, *pdev = adapter->pdev;
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001266 int vfs = 0, assigned_vfs = 0, pos;
Sathya Perla39f1d942012-05-08 19:41:24 +00001267 u16 offset, stride;
1268
1269 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
Sathya Perlad79c0a22012-06-05 19:37:22 +00001270 if (!pos)
1271 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00001272 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1273 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1274
1275 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1276 while (dev) {
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001277 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
Sathya Perla39f1d942012-05-08 19:41:24 +00001278 vfs++;
1279 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1280 assigned_vfs++;
1281 }
1282 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1283 }
1284 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1285}
1286
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001287static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001288{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001289 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001290 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001291 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001292 u64 pkts;
1293 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001294
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001295 if (!eqo->enable_aic) {
1296 eqd = eqo->eqd;
1297 goto modify_eqd;
1298 }
1299
1300 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001301 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001302
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001303 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1304
Sathya Perla4097f662009-03-24 16:40:13 -07001305 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001306 if (time_before(now, stats->rx_jiffies)) {
1307 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001308 return;
1309 }
1310
Sathya Perlaac124ff2011-07-25 19:10:14 +00001311 /* Update once a second */
1312 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001313 return;
1314
Sathya Perlaab1594e2011-07-25 19:10:15 +00001315 do {
1316 start = u64_stats_fetch_begin_bh(&stats->sync);
1317 pkts = stats->rx_pkts;
1318 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1319
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001320 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001321 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001322 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001323 eqd = (stats->rx_pps / 110000) << 3;
1324 eqd = min(eqd, eqo->max_eqd);
1325 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001326 if (eqd < 10)
1327 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001328
1329modify_eqd:
1330 if (eqd != eqo->cur_eqd) {
1331 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1332 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001333 }
Sathya Perla4097f662009-03-24 16:40:13 -07001334}
1335
Sathya Perla3abcded2010-10-03 22:12:27 -07001336static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001337 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001338{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001339 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001340
Sathya Perlaab1594e2011-07-25 19:10:15 +00001341 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001342 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001343 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001344 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001345 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001346 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001347 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001348 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001349 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001350}
1351
Sathya Perla2e588f82011-03-11 02:49:26 +00001352static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001353{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001354 /* L4 checksum is not reliable for non TCP/UDP packets.
1355 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001356 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1357 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001358}
1359
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001360static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1361 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001362{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001363 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001364 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001365 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001366
Sathya Perla3abcded2010-10-03 22:12:27 -07001367 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001368 BUG_ON(!rx_page_info->page);
1369
Ajit Khaparde205859a2010-02-09 01:34:21 +00001370 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001371 dma_unmap_page(&adapter->pdev->dev,
1372 dma_unmap_addr(rx_page_info, bus),
1373 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001374 rx_page_info->last_page_user = false;
1375 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001376
1377 atomic_dec(&rxq->used);
1378 return rx_page_info;
1379}
1380
1381/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001382static void be_rx_compl_discard(struct be_rx_obj *rxo,
1383 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001384{
Sathya Perla3abcded2010-10-03 22:12:27 -07001385 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001386 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001387 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001388
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001389 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001390 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001391 put_page(page_info->page);
1392 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001393 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001394 }
1395}
1396
1397/*
1398 * skb_fill_rx_data forms a complete skb for an ether frame
1399 * indicated by rxcp.
1400 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001401static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1402 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001403{
Sathya Perla3abcded2010-10-03 22:12:27 -07001404 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001405 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001406 u16 i, j;
1407 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001408 u8 *start;
1409
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001410 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001411 start = page_address(page_info->page) + page_info->page_offset;
1412 prefetch(start);
1413
1414 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001415 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001416
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001417 skb->len = curr_frag_len;
1418 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001419 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001420 /* Complete packet has now been moved to data */
1421 put_page(page_info->page);
1422 skb->data_len = 0;
1423 skb->tail += curr_frag_len;
1424 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001425 hdr_len = ETH_HLEN;
1426 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001427 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001428 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001429 skb_shinfo(skb)->frags[0].page_offset =
1430 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001431 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001432 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001433 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001434 skb->tail += hdr_len;
1435 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001436 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001437
Sathya Perla2e588f82011-03-11 02:49:26 +00001438 if (rxcp->pkt_size <= rx_frag_size) {
1439 BUG_ON(rxcp->num_rcvd != 1);
1440 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001441 }
1442
1443 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001444 index_inc(&rxcp->rxq_idx, rxq->len);
1445 remaining = rxcp->pkt_size - curr_frag_len;
1446 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001447 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001448 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001449
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001450 /* Coalesce all frags from the same physical page in one slot */
1451 if (page_info->page_offset == 0) {
1452 /* Fresh page */
1453 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001454 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001455 skb_shinfo(skb)->frags[j].page_offset =
1456 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001457 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001458 skb_shinfo(skb)->nr_frags++;
1459 } else {
1460 put_page(page_info->page);
1461 }
1462
Eric Dumazet9e903e02011-10-18 21:00:24 +00001463 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001464 skb->len += curr_frag_len;
1465 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001466 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001467 remaining -= curr_frag_len;
1468 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001469 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001470 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001471 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001472}
1473
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001474/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001475static void be_rx_compl_process(struct be_rx_obj *rxo,
1476 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001477{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001478 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001479 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001480 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001481
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001482 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001483 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001484 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001485 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001486 return;
1487 }
1488
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001489 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001490
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001491 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001492 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001493 else
1494 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001495
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001496 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001497 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001498 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001499 skb->rxhash = rxcp->rss_hash;
1500
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001501
Jiri Pirko343e43c2011-08-25 02:50:51 +00001502 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001503 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001504
1505 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001506}
1507
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001508/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001509void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1510 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001511{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001512 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001513 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001514 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001515 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001516 u16 remaining, curr_frag_len;
1517 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001518
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001519 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001520 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001521 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001522 return;
1523 }
1524
Sathya Perla2e588f82011-03-11 02:49:26 +00001525 remaining = rxcp->pkt_size;
1526 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001527 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001528
1529 curr_frag_len = min(remaining, rx_frag_size);
1530
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001531 /* Coalesce all frags from the same physical page in one slot */
1532 if (i == 0 || page_info->page_offset == 0) {
1533 /* First frag or Fresh page */
1534 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001535 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001536 skb_shinfo(skb)->frags[j].page_offset =
1537 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001538 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001539 } else {
1540 put_page(page_info->page);
1541 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001542 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001543 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001544 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001545 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001546 memset(page_info, 0, sizeof(*page_info));
1547 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001548 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001549
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001550 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001551 skb->len = rxcp->pkt_size;
1552 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001553 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001554 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001555 if (adapter->netdev->features & NETIF_F_RXHASH)
1556 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001557
Jiri Pirko343e43c2011-08-25 02:50:51 +00001558 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001559 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001560
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001561 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001562}
1563
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001564static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1565 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001566{
Sathya Perla2e588f82011-03-11 02:49:26 +00001567 rxcp->pkt_size =
1568 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1569 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1570 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1571 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001572 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001573 rxcp->ip_csum =
1574 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1575 rxcp->l4_csum =
1576 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1577 rxcp->ipv6 =
1578 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1579 rxcp->rxq_idx =
1580 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1581 rxcp->num_rcvd =
1582 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1583 rxcp->pkt_type =
1584 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001585 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001586 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001587 if (rxcp->vlanf) {
1588 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001589 compl);
1590 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1591 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001592 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001593 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001594}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001595
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001596static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1597 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001598{
1599 rxcp->pkt_size =
1600 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1601 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1602 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1603 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001604 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001605 rxcp->ip_csum =
1606 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1607 rxcp->l4_csum =
1608 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1609 rxcp->ipv6 =
1610 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1611 rxcp->rxq_idx =
1612 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1613 rxcp->num_rcvd =
1614 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1615 rxcp->pkt_type =
1616 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001617 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001618 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001619 if (rxcp->vlanf) {
1620 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001621 compl);
1622 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1623 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001624 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001625 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001626}
1627
1628static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1629{
1630 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1631 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1632 struct be_adapter *adapter = rxo->adapter;
1633
1634 /* For checking the valid bit it is Ok to use either definition as the
1635 * valid bit is at the same position in both v0 and v1 Rx compl */
1636 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001637 return NULL;
1638
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001639 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001640 be_dws_le_to_cpu(compl, sizeof(*compl));
1641
1642 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001643 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001644 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001645 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001646
Sathya Perla15d72182011-03-21 20:49:26 +00001647 if (rxcp->vlanf) {
1648 /* vlanf could be wrongly set in some cards.
1649 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001650 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001651 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001652
Sathya Perla15d72182011-03-21 20:49:26 +00001653 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001654 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001655
Somnath Kotur939cf302011-08-18 21:51:49 -07001656 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001657 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001658 rxcp->vlanf = 0;
1659 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001660
1661 /* As the compl has been parsed, reset it; we wont touch it again */
1662 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001663
Sathya Perla3abcded2010-10-03 22:12:27 -07001664 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001665 return rxcp;
1666}
1667
Eric Dumazet1829b082011-03-01 05:48:12 +00001668static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001669{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001670 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001671
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001672 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001673 gfp |= __GFP_COMP;
1674 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001675}
1676
1677/*
1678 * Allocate a page, split it to fragments of size rx_frag_size and post as
1679 * receive buffers to BE
1680 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001681static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001682{
Sathya Perla3abcded2010-10-03 22:12:27 -07001683 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001684 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001685 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001686 struct page *pagep = NULL;
1687 struct be_eth_rx_d *rxd;
1688 u64 page_dmaaddr = 0, frag_dmaaddr;
1689 u32 posted, page_offset = 0;
1690
Sathya Perla3abcded2010-10-03 22:12:27 -07001691 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001692 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1693 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001694 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001695 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001696 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001697 break;
1698 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001699 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1700 0, adapter->big_page_size,
1701 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001702 page_info->page_offset = 0;
1703 } else {
1704 get_page(pagep);
1705 page_info->page_offset = page_offset + rx_frag_size;
1706 }
1707 page_offset = page_info->page_offset;
1708 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001709 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001710 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1711
1712 rxd = queue_head_node(rxq);
1713 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1714 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001715
1716 /* Any space left in the current big page for another frag? */
1717 if ((page_offset + rx_frag_size + rx_frag_size) >
1718 adapter->big_page_size) {
1719 pagep = NULL;
1720 page_info->last_page_user = true;
1721 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001722
1723 prev_page_info = page_info;
1724 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001725 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001726 }
1727 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001728 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001729
1730 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001731 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001732 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001733 } else if (atomic_read(&rxq->used) == 0) {
1734 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001735 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001736 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001737}
1738
Sathya Perla5fb379e2009-06-18 00:02:59 +00001739static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001740{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001741 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1742
1743 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1744 return NULL;
1745
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001746 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001747 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1748
1749 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1750
1751 queue_tail_inc(tx_cq);
1752 return txcp;
1753}
1754
Sathya Perla3c8def92011-06-12 20:01:58 +00001755static u16 be_tx_compl_process(struct be_adapter *adapter,
1756 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001757{
Sathya Perla3c8def92011-06-12 20:01:58 +00001758 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001759 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001760 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001761 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001762 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1763 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001764
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001765 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001766 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001767 sent_skbs[txq->tail] = NULL;
1768
1769 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001770 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001771
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001772 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001773 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001774 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001775 unmap_tx_frag(&adapter->pdev->dev, wrb,
1776 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001777 unmap_skb_hdr = false;
1778
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001779 num_wrbs++;
1780 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001781 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001782
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001783 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001784 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001785}
1786
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001787/* Return the number of events in the event queue */
1788static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001789{
1790 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001791 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001792
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001793 do {
1794 eqe = queue_tail_node(&eqo->q);
1795 if (eqe->evt == 0)
1796 break;
1797
1798 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001799 eqe->evt = 0;
1800 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001801 queue_tail_inc(&eqo->q);
1802 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001803
1804 return num;
1805}
1806
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001807/* Leaves the EQ is disarmed state */
1808static void be_eq_clean(struct be_eq_obj *eqo)
1809{
1810 int num = events_get(eqo);
1811
1812 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1813}
1814
1815static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001816{
1817 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001818 struct be_queue_info *rxq = &rxo->q;
1819 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001820 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00001821 struct be_adapter *adapter = rxo->adapter;
1822 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001823 u16 tail;
1824
Sathya Perlad23e9462012-12-17 19:38:51 +00001825 /* Consume pending rx completions.
1826 * Wait for the flush completion (identified by zero num_rcvd)
1827 * to arrive. Notify CQ even when there are no more CQ entries
1828 * for HW to flush partially coalesced CQ entries.
1829 * In Lancer, there is no need to wait for flush compl.
1830 */
1831 for (;;) {
1832 rxcp = be_rx_compl_get(rxo);
1833 if (rxcp == NULL) {
1834 if (lancer_chip(adapter))
1835 break;
1836
1837 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1838 dev_warn(&adapter->pdev->dev,
1839 "did not receive flush compl\n");
1840 break;
1841 }
1842 be_cq_notify(adapter, rx_cq->id, true, 0);
1843 mdelay(1);
1844 } else {
1845 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00001846 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00001847 if (rxcp->num_rcvd == 0)
1848 break;
1849 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001850 }
1851
Sathya Perlad23e9462012-12-17 19:38:51 +00001852 /* After cleanup, leave the CQ in unarmed state */
1853 be_cq_notify(adapter, rx_cq->id, false, 0);
1854
1855 /* Then free posted rx buffers that were not used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001856 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001857 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001858 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001859 put_page(page_info->page);
1860 memset(page_info, 0, sizeof(*page_info));
1861 }
1862 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001863 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001864}
1865
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001866static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001867{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001868 struct be_tx_obj *txo;
1869 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001870 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001871 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001872 struct sk_buff *sent_skb;
1873 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001874 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001875
Sathya Perlaa8e91792009-08-10 03:42:43 +00001876 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1877 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001878 pending_txqs = adapter->num_tx_qs;
1879
1880 for_all_tx_queues(adapter, txo, i) {
1881 txq = &txo->q;
1882 while ((txcp = be_tx_compl_get(&txo->cq))) {
1883 end_idx =
1884 AMAP_GET_BITS(struct amap_eth_tx_compl,
1885 wrb_index, txcp);
1886 num_wrbs += be_tx_compl_process(adapter, txo,
1887 end_idx);
1888 cmpl++;
1889 }
1890 if (cmpl) {
1891 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1892 atomic_sub(num_wrbs, &txq->used);
1893 cmpl = 0;
1894 num_wrbs = 0;
1895 }
1896 if (atomic_read(&txq->used) == 0)
1897 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001898 }
1899
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001900 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001901 break;
1902
1903 mdelay(1);
1904 } while (true);
1905
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001906 for_all_tx_queues(adapter, txo, i) {
1907 txq = &txo->q;
1908 if (atomic_read(&txq->used))
1909 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1910 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001911
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001912 /* free posted tx for which compls will never arrive */
1913 while (atomic_read(&txq->used)) {
1914 sent_skb = txo->sent_skb_list[txq->tail];
1915 end_idx = txq->tail;
1916 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1917 &dummy_wrb);
1918 index_adv(&end_idx, num_wrbs - 1, txq->len);
1919 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1920 atomic_sub(num_wrbs, &txq->used);
1921 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001922 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001923}
1924
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001925static void be_evt_queues_destroy(struct be_adapter *adapter)
1926{
1927 struct be_eq_obj *eqo;
1928 int i;
1929
1930 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001931 if (eqo->q.created) {
1932 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001933 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001934 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001935 be_queue_free(adapter, &eqo->q);
1936 }
1937}
1938
1939static int be_evt_queues_create(struct be_adapter *adapter)
1940{
1941 struct be_queue_info *eq;
1942 struct be_eq_obj *eqo;
1943 int i, rc;
1944
1945 adapter->num_evt_qs = num_irqs(adapter);
1946
1947 for_all_evt_queues(adapter, eqo, i) {
1948 eqo->adapter = adapter;
1949 eqo->tx_budget = BE_TX_BUDGET;
1950 eqo->idx = i;
1951 eqo->max_eqd = BE_MAX_EQD;
1952 eqo->enable_aic = true;
1953
1954 eq = &eqo->q;
1955 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1956 sizeof(struct be_eq_entry));
1957 if (rc)
1958 return rc;
1959
1960 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1961 if (rc)
1962 return rc;
1963 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001964 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001965}
1966
Sathya Perla5fb379e2009-06-18 00:02:59 +00001967static void be_mcc_queues_destroy(struct be_adapter *adapter)
1968{
1969 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001970
Sathya Perla8788fdc2009-07-27 22:52:03 +00001971 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001972 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001973 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001974 be_queue_free(adapter, q);
1975
Sathya Perla8788fdc2009-07-27 22:52:03 +00001976 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001977 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001978 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001979 be_queue_free(adapter, q);
1980}
1981
1982/* Must be called only after TX qs are created as MCC shares TX EQ */
1983static int be_mcc_queues_create(struct be_adapter *adapter)
1984{
1985 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001986
Sathya Perla8788fdc2009-07-27 22:52:03 +00001987 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001988 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001989 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001990 goto err;
1991
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001992 /* Use the default EQ for MCC completions */
1993 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001994 goto mcc_cq_free;
1995
Sathya Perla8788fdc2009-07-27 22:52:03 +00001996 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001997 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1998 goto mcc_cq_destroy;
1999
Sathya Perla8788fdc2009-07-27 22:52:03 +00002000 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002001 goto mcc_q_free;
2002
2003 return 0;
2004
2005mcc_q_free:
2006 be_queue_free(adapter, q);
2007mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002008 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002009mcc_cq_free:
2010 be_queue_free(adapter, cq);
2011err:
2012 return -1;
2013}
2014
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002015static void be_tx_queues_destroy(struct be_adapter *adapter)
2016{
2017 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002018 struct be_tx_obj *txo;
2019 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002020
Sathya Perla3c8def92011-06-12 20:01:58 +00002021 for_all_tx_queues(adapter, txo, i) {
2022 q = &txo->q;
2023 if (q->created)
2024 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2025 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002026
Sathya Perla3c8def92011-06-12 20:01:58 +00002027 q = &txo->cq;
2028 if (q->created)
2029 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2030 be_queue_free(adapter, q);
2031 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002032}
2033
Sathya Perladafc0fe2011-10-24 02:45:02 +00002034static int be_num_txqs_want(struct be_adapter *adapter)
2035{
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002036 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
2037 be_is_mc(adapter) ||
2038 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
Sathya Perlaca34fe32012-11-06 17:48:56 +00002039 BE2_chip(adapter))
Sathya Perladafc0fe2011-10-24 02:45:02 +00002040 return 1;
2041 else
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002042 return adapter->max_tx_queues;
Sathya Perladafc0fe2011-10-24 02:45:02 +00002043}
2044
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002045static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002046{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002047 struct be_queue_info *cq, *eq;
2048 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00002049 struct be_tx_obj *txo;
2050 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002051
Sathya Perladafc0fe2011-10-24 02:45:02 +00002052 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00002053 if (adapter->num_tx_qs != MAX_TX_QS) {
2054 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00002055 netif_set_real_num_tx_queues(adapter->netdev,
2056 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00002057 rtnl_unlock();
2058 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00002059
Sathya Perla3c8def92011-06-12 20:01:58 +00002060 for_all_tx_queues(adapter, txo, i) {
2061 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002062 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2063 sizeof(struct be_eth_tx_compl));
2064 if (status)
2065 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002066
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002067 /* If num_evt_qs is less than num_tx_qs, then more than
2068 * one txq share an eq
2069 */
2070 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2071 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2072 if (status)
2073 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00002074 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002075 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002076}
2077
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002078static int be_tx_qs_create(struct be_adapter *adapter)
2079{
2080 struct be_tx_obj *txo;
2081 int i, status;
2082
2083 for_all_tx_queues(adapter, txo, i) {
2084 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2085 sizeof(struct be_eth_wrb));
2086 if (status)
2087 return status;
2088
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002089 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002090 if (status)
2091 return status;
2092 }
2093
Sathya Perlad3791422012-09-28 04:39:44 +00002094 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2095 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002096 return 0;
2097}
2098
2099static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002100{
2101 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002102 struct be_rx_obj *rxo;
2103 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002104
Sathya Perla3abcded2010-10-03 22:12:27 -07002105 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002106 q = &rxo->cq;
2107 if (q->created)
2108 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2109 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002110 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002111}
2112
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002113static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002114{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002115 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002116 struct be_rx_obj *rxo;
2117 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002118
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002119 /* We'll create as many RSS rings as there are irqs.
2120 * But when there's only one irq there's no use creating RSS rings
2121 */
2122 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2123 num_irqs(adapter) + 1 : 1;
Sathya Perla7f640062012-06-05 19:37:20 +00002124 if (adapter->num_rx_qs != MAX_RX_QS) {
2125 rtnl_lock();
2126 netif_set_real_num_rx_queues(adapter->netdev,
2127 adapter->num_rx_qs);
2128 rtnl_unlock();
2129 }
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002130
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002131 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002132 for_all_rx_queues(adapter, rxo, i) {
2133 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002134 cq = &rxo->cq;
2135 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2136 sizeof(struct be_eth_rx_compl));
2137 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002138 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002139
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002140 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2141 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002142 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002143 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002144 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002145
Sathya Perlad3791422012-09-28 04:39:44 +00002146 dev_info(&adapter->pdev->dev,
2147 "created %d RSS queue(s) and 1 default RX queue\n",
2148 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002149 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002150}
2151
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002152static irqreturn_t be_intx(int irq, void *dev)
2153{
Sathya Perlae49cc342012-11-27 19:50:02 +00002154 struct be_eq_obj *eqo = dev;
2155 struct be_adapter *adapter = eqo->adapter;
2156 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002157
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002158 /* IRQ is not expected when NAPI is scheduled as the EQ
2159 * will not be armed.
2160 * But, this can happen on Lancer INTx where it takes
2161 * a while to de-assert INTx or in BE2 where occasionaly
2162 * an interrupt may be raised even when EQ is unarmed.
2163 * If NAPI is already scheduled, then counting & notifying
2164 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002165 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002166 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002167 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002168 __napi_schedule(&eqo->napi);
2169 if (num_evts)
2170 eqo->spurious_intr = 0;
2171 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002172 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002173
2174 /* Return IRQ_HANDLED only for the the first spurious intr
2175 * after a valid intr to stop the kernel from branding
2176 * this irq as a bad one!
2177 */
2178 if (num_evts || eqo->spurious_intr++ == 0)
2179 return IRQ_HANDLED;
2180 else
2181 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002182}
2183
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002184static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002185{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002186 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002187
Sathya Perla0b545a62012-11-23 00:27:18 +00002188 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2189 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002190 return IRQ_HANDLED;
2191}
2192
Sathya Perla2e588f82011-03-11 02:49:26 +00002193static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002194{
Sathya Perla2e588f82011-03-11 02:49:26 +00002195 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002196}
2197
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002198static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2199 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002200{
Sathya Perla3abcded2010-10-03 22:12:27 -07002201 struct be_adapter *adapter = rxo->adapter;
2202 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002203 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002204 u32 work_done;
2205
2206 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002207 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002208 if (!rxcp)
2209 break;
2210
Sathya Perla12004ae2011-08-02 19:57:46 +00002211 /* Is it a flush compl that has no data */
2212 if (unlikely(rxcp->num_rcvd == 0))
2213 goto loop_continue;
2214
2215 /* Discard compl with partial DMA Lancer B0 */
2216 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002217 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002218 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002219 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002220
Sathya Perla12004ae2011-08-02 19:57:46 +00002221 /* On BE drop pkts that arrive due to imperfect filtering in
2222 * promiscuous mode on some skews
2223 */
2224 if (unlikely(rxcp->port != adapter->port_num &&
2225 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002226 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002227 goto loop_continue;
2228 }
2229
2230 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002231 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002232 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002233 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002234loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002235 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002236 }
2237
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002238 if (work_done) {
2239 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002240
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002241 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2242 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002243 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002244
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002245 return work_done;
2246}
2247
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002248static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2249 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002250{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002251 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002252 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002253
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002254 for (work_done = 0; work_done < budget; work_done++) {
2255 txcp = be_tx_compl_get(&txo->cq);
2256 if (!txcp)
2257 break;
2258 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002259 AMAP_GET_BITS(struct amap_eth_tx_compl,
2260 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002261 }
2262
2263 if (work_done) {
2264 be_cq_notify(adapter, txo->cq.id, true, work_done);
2265 atomic_sub(num_wrbs, &txo->q.used);
2266
2267 /* As Tx wrbs have been freed up, wake up netdev queue
2268 * if it was stopped due to lack of tx wrbs. */
2269 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2270 atomic_read(&txo->q.used) < txo->q.len / 2) {
2271 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002272 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002273
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002274 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2275 tx_stats(txo)->tx_compl += work_done;
2276 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2277 }
2278 return (work_done < budget); /* Done */
2279}
Sathya Perla3c8def92011-06-12 20:01:58 +00002280
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002281int be_poll(struct napi_struct *napi, int budget)
2282{
2283 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2284 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002285 int max_work = 0, work, i, num_evts;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002286 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002287
Sathya Perla0b545a62012-11-23 00:27:18 +00002288 num_evts = events_get(eqo);
2289
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002290 /* Process all TXQs serviced by this EQ */
2291 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2292 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2293 eqo->tx_budget, i);
2294 if (!tx_done)
2295 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002296 }
2297
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002298 /* This loop will iterate twice for EQ0 in which
2299 * completions of the last RXQ (default one) are also processed
2300 * For other EQs the loop iterates only once
2301 */
2302 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2303 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2304 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002305 }
2306
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002307 if (is_mcc_eqo(eqo))
2308 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002309
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002310 if (max_work < budget) {
2311 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002312 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002313 } else {
2314 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002315 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002316 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002317 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002318}
2319
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002320void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002321{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002322 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2323 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002324 u32 i;
2325
Sathya Perlad23e9462012-12-17 19:38:51 +00002326 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002327 return;
2328
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002329 if (lancer_chip(adapter)) {
2330 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2331 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2332 sliport_err1 = ioread32(adapter->db +
2333 SLIPORT_ERROR1_OFFSET);
2334 sliport_err2 = ioread32(adapter->db +
2335 SLIPORT_ERROR2_OFFSET);
2336 }
2337 } else {
2338 pci_read_config_dword(adapter->pdev,
2339 PCICFG_UE_STATUS_LOW, &ue_lo);
2340 pci_read_config_dword(adapter->pdev,
2341 PCICFG_UE_STATUS_HIGH, &ue_hi);
2342 pci_read_config_dword(adapter->pdev,
2343 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2344 pci_read_config_dword(adapter->pdev,
2345 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002346
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002347 ue_lo = (ue_lo & ~ue_lo_mask);
2348 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002349 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002350
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002351 /* On certain platforms BE hardware can indicate spurious UEs.
2352 * Allow the h/w to stop working completely in case of a real UE.
2353 * Hence not setting the hw_error for UE detection.
2354 */
2355 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002356 adapter->hw_error = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002357 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002358 "Error detected in the card\n");
2359 }
2360
2361 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2362 dev_err(&adapter->pdev->dev,
2363 "ERR: sliport status 0x%x\n", sliport_status);
2364 dev_err(&adapter->pdev->dev,
2365 "ERR: sliport error1 0x%x\n", sliport_err1);
2366 dev_err(&adapter->pdev->dev,
2367 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002368 }
2369
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002370 if (ue_lo) {
2371 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2372 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002373 dev_err(&adapter->pdev->dev,
2374 "UE: %s bit set\n", ue_status_low_desc[i]);
2375 }
2376 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002377
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002378 if (ue_hi) {
2379 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2380 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002381 dev_err(&adapter->pdev->dev,
2382 "UE: %s bit set\n", ue_status_hi_desc[i]);
2383 }
2384 }
2385
2386}
2387
Sathya Perla8d56ff12009-11-22 22:02:26 +00002388static void be_msix_disable(struct be_adapter *adapter)
2389{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002390 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002391 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002392 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002393 }
2394}
2395
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002396static uint be_num_rss_want(struct be_adapter *adapter)
2397{
Yuval Mintz30e80b52012-07-01 03:19:00 +00002398 u32 num = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002399
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002400 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002401 (lancer_chip(adapter) ||
2402 (!sriov_want(adapter) && be_physfn(adapter)))) {
2403 num = adapter->max_rss_queues;
Yuval Mintz30e80b52012-07-01 03:19:00 +00002404 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2405 }
2406 return num;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002407}
2408
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002409static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002410{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002411#define BE_MIN_MSIX_VECTORS 1
Parav Pandit045508a2012-03-26 14:27:13 +00002412 int i, status, num_vec, num_roce_vec = 0;
Sathya Perlad3791422012-09-28 04:39:44 +00002413 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002414
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002415 /* If RSS queues are not used, need a vec for default RX Q */
2416 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
Parav Pandit045508a2012-03-26 14:27:13 +00002417 if (be_roce_supported(adapter)) {
2418 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2419 (num_online_cpus() + 1));
2420 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2421 num_vec += num_roce_vec;
2422 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2423 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002424 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002425
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002426 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002427 adapter->msix_entries[i].entry = i;
2428
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002429 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002430 if (status == 0) {
2431 goto done;
2432 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002433 num_vec = status;
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002434 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2435 num_vec);
2436 if (!status)
Sathya Perla3abcded2010-10-03 22:12:27 -07002437 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002438 }
Sathya Perlad3791422012-09-28 04:39:44 +00002439
2440 dev_warn(dev, "MSIx enable failed\n");
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002441 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2442 if (!be_physfn(adapter))
2443 return status;
2444 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002445done:
Parav Pandit045508a2012-03-26 14:27:13 +00002446 if (be_roce_supported(adapter)) {
2447 if (num_vec > num_roce_vec) {
2448 adapter->num_msix_vec = num_vec - num_roce_vec;
2449 adapter->num_msix_roce_vec =
2450 num_vec - adapter->num_msix_vec;
2451 } else {
2452 adapter->num_msix_vec = num_vec;
2453 adapter->num_msix_roce_vec = 0;
2454 }
2455 } else
2456 adapter->num_msix_vec = num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002457 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002458 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002459}
2460
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002461static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002462 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002463{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002464 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002465}
2466
2467static int be_msix_register(struct be_adapter *adapter)
2468{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002469 struct net_device *netdev = adapter->netdev;
2470 struct be_eq_obj *eqo;
2471 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002472
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002473 for_all_evt_queues(adapter, eqo, i) {
2474 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2475 vec = be_msix_vec_get(adapter, eqo);
2476 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002477 if (status)
2478 goto err_msix;
2479 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002480
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002481 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002482err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002483 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2484 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2485 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2486 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002487 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002488 return status;
2489}
2490
2491static int be_irq_register(struct be_adapter *adapter)
2492{
2493 struct net_device *netdev = adapter->netdev;
2494 int status;
2495
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002496 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002497 status = be_msix_register(adapter);
2498 if (status == 0)
2499 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002500 /* INTx is not supported for VF */
2501 if (!be_physfn(adapter))
2502 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002503 }
2504
Sathya Perlae49cc342012-11-27 19:50:02 +00002505 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002506 netdev->irq = adapter->pdev->irq;
2507 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002508 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002509 if (status) {
2510 dev_err(&adapter->pdev->dev,
2511 "INTx request IRQ failed - err %d\n", status);
2512 return status;
2513 }
2514done:
2515 adapter->isr_registered = true;
2516 return 0;
2517}
2518
2519static void be_irq_unregister(struct be_adapter *adapter)
2520{
2521 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002522 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002523 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002524
2525 if (!adapter->isr_registered)
2526 return;
2527
2528 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002529 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002530 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002531 goto done;
2532 }
2533
2534 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002535 for_all_evt_queues(adapter, eqo, i)
2536 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002537
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002538done:
2539 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002540}
2541
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002542static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002543{
2544 struct be_queue_info *q;
2545 struct be_rx_obj *rxo;
2546 int i;
2547
2548 for_all_rx_queues(adapter, rxo, i) {
2549 q = &rxo->q;
2550 if (q->created) {
2551 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002552 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002553 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002554 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002555 }
2556}
2557
Sathya Perla889cd4b2010-05-30 23:33:45 +00002558static int be_close(struct net_device *netdev)
2559{
2560 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002561 struct be_eq_obj *eqo;
2562 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002563
Parav Pandit045508a2012-03-26 14:27:13 +00002564 be_roce_dev_close(adapter);
2565
Somnath Kotur04d3d622013-05-02 03:36:55 +00002566 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2567 for_all_evt_queues(adapter, eqo, i)
2568 napi_disable(&eqo->napi);
2569 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2570 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002571
2572 be_async_mcc_disable(adapter);
2573
2574 /* Wait for all pending tx completions to arrive so that
2575 * all tx skbs are freed.
2576 */
2577 be_tx_compl_clean(adapter);
Sathya Perlafba87552013-05-08 02:05:50 +00002578 netif_tx_disable(netdev);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002579
2580 be_rx_qs_destroy(adapter);
2581
2582 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002583 if (msix_enabled(adapter))
2584 synchronize_irq(be_msix_vec_get(adapter, eqo));
2585 else
2586 synchronize_irq(netdev->irq);
2587 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002588 }
2589
Sathya Perla889cd4b2010-05-30 23:33:45 +00002590 be_irq_unregister(adapter);
2591
Sathya Perla482c9e72011-06-29 23:33:17 +00002592 return 0;
2593}
2594
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002595static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002596{
2597 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002598 int rc, i, j;
2599 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002600
2601 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002602 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2603 sizeof(struct be_eth_rx_d));
2604 if (rc)
2605 return rc;
2606 }
2607
2608 /* The FW would like the default RXQ to be created first */
2609 rxo = default_rxo(adapter);
2610 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2611 adapter->if_handle, false, &rxo->rss_id);
2612 if (rc)
2613 return rc;
2614
2615 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002616 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002617 rx_frag_size, adapter->if_handle,
2618 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002619 if (rc)
2620 return rc;
2621 }
2622
2623 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002624 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2625 for_all_rss_queues(adapter, rxo, i) {
2626 if ((j + i) >= 128)
2627 break;
2628 rsstable[j + i] = rxo->rss_id;
2629 }
2630 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002631 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2632 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2633
2634 if (!BEx_chip(adapter))
2635 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2636 RSS_ENABLE_UDP_IPV6;
2637
2638 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2639 128);
2640 if (rc) {
2641 adapter->rss_flags = 0;
Sathya Perla482c9e72011-06-29 23:33:17 +00002642 return rc;
Suresh Reddy594ad542013-04-25 23:03:20 +00002643 }
Sathya Perla482c9e72011-06-29 23:33:17 +00002644 }
2645
2646 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002647 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002648 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002649 return 0;
2650}
2651
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002652static int be_open(struct net_device *netdev)
2653{
2654 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002655 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002656 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002657 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002658 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002659 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002660
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002661 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002662 if (status)
2663 goto err;
2664
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002665 status = be_irq_register(adapter);
2666 if (status)
2667 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002668
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002669 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002670 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002671
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002672 for_all_tx_queues(adapter, txo, i)
2673 be_cq_notify(adapter, txo->cq.id, true, 0);
2674
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002675 be_async_mcc_enable(adapter);
2676
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002677 for_all_evt_queues(adapter, eqo, i) {
2678 napi_enable(&eqo->napi);
2679 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2680 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002681 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002682
Sathya Perla323ff712012-09-28 04:39:43 +00002683 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002684 if (!status)
2685 be_link_status_update(adapter, link_status);
2686
Sathya Perlafba87552013-05-08 02:05:50 +00002687 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002688 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002689 return 0;
2690err:
2691 be_close(adapter->netdev);
2692 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002693}
2694
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002695static int be_setup_wol(struct be_adapter *adapter, bool enable)
2696{
2697 struct be_dma_mem cmd;
2698 int status = 0;
2699 u8 mac[ETH_ALEN];
2700
2701 memset(mac, 0, ETH_ALEN);
2702
2703 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002704 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
Joe Perches1f9061d22013-03-15 07:23:58 +00002705 GFP_KERNEL | __GFP_ZERO);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002706 if (cmd.va == NULL)
2707 return -1;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002708
2709 if (enable) {
2710 status = pci_write_config_dword(adapter->pdev,
2711 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2712 if (status) {
2713 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002714 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002715 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2716 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002717 return status;
2718 }
2719 status = be_cmd_enable_magic_wol(adapter,
2720 adapter->netdev->dev_addr, &cmd);
2721 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2722 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2723 } else {
2724 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2725 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2726 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2727 }
2728
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002729 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002730 return status;
2731}
2732
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002733/*
2734 * Generate a seed MAC address from the PF MAC Address using jhash.
2735 * MAC Address for VFs are assigned incrementally starting from the seed.
2736 * These addresses are programmed in the ASIC by the PF and the VF driver
2737 * queries for the MAC address during its probe.
2738 */
Sathya Perla4c876612013-02-03 20:30:11 +00002739static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002740{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002741 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002742 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002743 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002744 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002745
2746 be_vf_eth_addr_generate(adapter, mac);
2747
Sathya Perla11ac75e2011-12-13 00:58:50 +00002748 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002749 if (lancer_chip(adapter)) {
2750 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2751 } else {
2752 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002753 vf_cfg->if_handle,
2754 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002755 }
2756
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002757 if (status)
2758 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002759 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002760 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002761 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002762
2763 mac[5] += 1;
2764 }
2765 return status;
2766}
2767
Sathya Perla4c876612013-02-03 20:30:11 +00002768static int be_vfs_mac_query(struct be_adapter *adapter)
2769{
2770 int status, vf;
2771 u8 mac[ETH_ALEN];
2772 struct be_vf_cfg *vf_cfg;
2773 bool active;
2774
2775 for_all_vfs(adapter, vf_cfg, vf) {
2776 be_cmd_get_mac_from_list(adapter, mac, &active,
2777 &vf_cfg->pmac_id, 0);
2778
2779 status = be_cmd_mac_addr_query(adapter, mac, false,
2780 vf_cfg->if_handle, 0);
2781 if (status)
2782 return status;
2783 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2784 }
2785 return 0;
2786}
2787
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002788static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002789{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002790 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002791 u32 vf;
2792
Sathya Perla39f1d942012-05-08 19:41:24 +00002793 if (be_find_vfs(adapter, ASSIGNED)) {
Sathya Perla4c876612013-02-03 20:30:11 +00002794 dev_warn(&adapter->pdev->dev,
2795 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00002796 goto done;
2797 }
2798
Sathya Perlab4c1df92013-05-08 02:05:47 +00002799 pci_disable_sriov(adapter->pdev);
2800
Sathya Perla11ac75e2011-12-13 00:58:50 +00002801 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002802 if (lancer_chip(adapter))
2803 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2804 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002805 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2806 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002807
Sathya Perla11ac75e2011-12-13 00:58:50 +00002808 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2809 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002810done:
2811 kfree(adapter->vf_cfg);
2812 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002813}
2814
Sathya Perlaa54769f2011-10-24 02:45:00 +00002815static int be_clear(struct be_adapter *adapter)
2816{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002817 int i = 1;
2818
Sathya Perla191eb752012-02-23 18:50:13 +00002819 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2820 cancel_delayed_work_sync(&adapter->work);
2821 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2822 }
2823
Sathya Perla11ac75e2011-12-13 00:58:50 +00002824 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002825 be_vf_clear(adapter);
2826
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002827 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2828 be_cmd_pmac_del(adapter, adapter->if_handle,
2829 adapter->pmac_id[i], 0);
2830
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002831 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002832
2833 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002834 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002835 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002836 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002837
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002838 kfree(adapter->pmac_id);
2839 adapter->pmac_id = NULL;
2840
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002841 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002842 return 0;
2843}
2844
Sathya Perla4c876612013-02-03 20:30:11 +00002845static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002846{
Sathya Perla4c876612013-02-03 20:30:11 +00002847 struct be_vf_cfg *vf_cfg;
2848 u32 cap_flags, en_flags, vf;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002849 int status;
2850
Sathya Perla4c876612013-02-03 20:30:11 +00002851 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2852 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002853
Sathya Perla4c876612013-02-03 20:30:11 +00002854 for_all_vfs(adapter, vf_cfg, vf) {
2855 if (!BE3_chip(adapter))
Vasundhara Volama05f99d2013-04-21 23:28:17 +00002856 be_cmd_get_profile_config(adapter, &cap_flags,
2857 NULL, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00002858
2859 /* If a FW profile exists, then cap_flags are updated */
2860 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2861 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2862 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2863 &vf_cfg->if_handle, vf + 1);
2864 if (status)
2865 goto err;
2866 }
2867err:
2868 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002869}
2870
Sathya Perla39f1d942012-05-08 19:41:24 +00002871static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002872{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002873 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002874 int vf;
2875
Sathya Perla39f1d942012-05-08 19:41:24 +00002876 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2877 GFP_KERNEL);
2878 if (!adapter->vf_cfg)
2879 return -ENOMEM;
2880
Sathya Perla11ac75e2011-12-13 00:58:50 +00002881 for_all_vfs(adapter, vf_cfg, vf) {
2882 vf_cfg->if_handle = -1;
2883 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002884 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002885 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002886}
2887
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002888static int be_vf_setup(struct be_adapter *adapter)
2889{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002890 struct be_vf_cfg *vf_cfg;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002891 u16 def_vlan, lnk_speed;
Sathya Perla4c876612013-02-03 20:30:11 +00002892 int status, old_vfs, vf;
2893 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002894
Sathya Perla4c876612013-02-03 20:30:11 +00002895 old_vfs = be_find_vfs(adapter, ENABLED);
2896 if (old_vfs) {
2897 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2898 if (old_vfs != num_vfs)
2899 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2900 adapter->num_vfs = old_vfs;
Sathya Perla39f1d942012-05-08 19:41:24 +00002901 } else {
Sathya Perla4c876612013-02-03 20:30:11 +00002902 if (num_vfs > adapter->dev_num_vfs)
2903 dev_info(dev, "Device supports %d VFs and not %d\n",
2904 adapter->dev_num_vfs, num_vfs);
2905 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
Sathya Perlab4c1df92013-05-08 02:05:47 +00002906 if (!adapter->num_vfs)
Sathya Perla4c876612013-02-03 20:30:11 +00002907 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00002908 }
2909
2910 status = be_vf_setup_init(adapter);
2911 if (status)
2912 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002913
Sathya Perla4c876612013-02-03 20:30:11 +00002914 if (old_vfs) {
2915 for_all_vfs(adapter, vf_cfg, vf) {
2916 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2917 if (status)
2918 goto err;
2919 }
2920 } else {
2921 status = be_vfs_if_create(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002922 if (status)
2923 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002924 }
2925
Sathya Perla4c876612013-02-03 20:30:11 +00002926 if (old_vfs) {
2927 status = be_vfs_mac_query(adapter);
2928 if (status)
2929 goto err;
2930 } else {
Sathya Perla39f1d942012-05-08 19:41:24 +00002931 status = be_vf_eth_addr_config(adapter);
2932 if (status)
2933 goto err;
2934 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002935
Sathya Perla11ac75e2011-12-13 00:58:50 +00002936 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla4c876612013-02-03 20:30:11 +00002937 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2938 * Allow full available bandwidth
2939 */
2940 if (BE3_chip(adapter) && !old_vfs)
2941 be_cmd_set_qos(adapter, 1000, vf+1);
2942
2943 status = be_cmd_link_status_query(adapter, &lnk_speed,
2944 NULL, vf + 1);
2945 if (!status)
2946 vf_cfg->tx_rate = lnk_speed;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002947
2948 status = be_cmd_get_hsw_config(adapter, &def_vlan,
Sathya Perla4c876612013-02-03 20:30:11 +00002949 vf + 1, vf_cfg->if_handle);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002950 if (status)
2951 goto err;
2952 vf_cfg->def_vid = def_vlan;
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00002953
2954 be_cmd_enable_vf(adapter, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002955 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00002956
2957 if (!old_vfs) {
2958 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2959 if (status) {
2960 dev_err(dev, "SRIOV enable failed\n");
2961 adapter->num_vfs = 0;
2962 goto err;
2963 }
2964 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002965 return 0;
2966err:
Sathya Perla4c876612013-02-03 20:30:11 +00002967 dev_err(dev, "VF setup failed\n");
2968 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002969 return status;
2970}
2971
Sathya Perla30128032011-11-10 19:17:57 +00002972static void be_setup_init(struct be_adapter *adapter)
2973{
2974 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002975 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002976 adapter->if_handle = -1;
2977 adapter->be3_native = false;
2978 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002979 if (be_physfn(adapter))
2980 adapter->cmd_privileges = MAX_PRIVILEGES;
2981 else
2982 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00002983}
2984
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002985static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2986 bool *active_mac, u32 *pmac_id)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002987{
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002988 int status = 0;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002989
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002990 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2991 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2992 if (!lancer_chip(adapter) && !be_physfn(adapter))
2993 *active_mac = true;
2994 else
2995 *active_mac = false;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002996
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002997 return status;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002998 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002999
3000 if (lancer_chip(adapter)) {
3001 status = be_cmd_get_mac_from_list(adapter, mac,
3002 active_mac, pmac_id, 0);
3003 if (*active_mac) {
Sathya Perla5ee49792012-09-28 04:39:41 +00003004 status = be_cmd_mac_addr_query(adapter, mac, false,
3005 if_handle, *pmac_id);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003006 }
3007 } else if (be_physfn(adapter)) {
3008 /* For BE3, for PF get permanent MAC */
Sathya Perla5ee49792012-09-28 04:39:41 +00003009 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003010 *active_mac = false;
3011 } else {
3012 /* For BE3, for VF get soft MAC assigned by PF*/
Sathya Perla5ee49792012-09-28 04:39:41 +00003013 status = be_cmd_mac_addr_query(adapter, mac, false,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003014 if_handle, 0);
3015 *active_mac = true;
3016 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003017 return status;
3018}
3019
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003020static void be_get_resources(struct be_adapter *adapter)
3021{
Sathya Perla4c876612013-02-03 20:30:11 +00003022 u16 dev_num_vfs;
3023 int pos, status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003024 bool profile_present = false;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003025 u16 txq_count = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003026
Sathya Perla4c876612013-02-03 20:30:11 +00003027 if (!BEx_chip(adapter)) {
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003028 status = be_cmd_get_func_config(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003029 if (!status)
3030 profile_present = true;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003031 } else if (BE3_chip(adapter) && be_physfn(adapter)) {
3032 be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003033 }
3034
3035 if (profile_present) {
3036 /* Sanity fixes for Lancer */
3037 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
3038 BE_UC_PMAC_COUNT);
3039 adapter->max_vlans = min_t(u16, adapter->max_vlans,
3040 BE_NUM_VLANS_SUPPORTED);
3041 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
3042 BE_MAX_MC);
3043 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3044 MAX_TX_QS);
3045 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
3046 BE3_MAX_RSS_QS);
3047 adapter->max_event_queues = min_t(u16,
3048 adapter->max_event_queues,
3049 BE3_MAX_RSS_QS);
3050
3051 if (adapter->max_rss_queues &&
3052 adapter->max_rss_queues == adapter->max_rx_queues)
3053 adapter->max_rss_queues -= 1;
3054
3055 if (adapter->max_event_queues < adapter->max_rss_queues)
3056 adapter->max_rss_queues = adapter->max_event_queues;
3057
3058 } else {
3059 if (be_physfn(adapter))
3060 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3061 else
3062 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3063
3064 if (adapter->function_mode & FLEX10_MODE)
3065 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3066 else
3067 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3068
3069 adapter->max_mcast_mac = BE_MAX_MC;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003070 adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
3071 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3072 MAX_TX_QS);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003073 adapter->max_rss_queues = (adapter->be3_native) ?
3074 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3075 adapter->max_event_queues = BE3_MAX_RSS_QS;
3076
3077 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
3078 BE_IF_FLAGS_BROADCAST |
3079 BE_IF_FLAGS_MULTICAST |
3080 BE_IF_FLAGS_PASS_L3L4_ERRORS |
3081 BE_IF_FLAGS_MCAST_PROMISCUOUS |
3082 BE_IF_FLAGS_VLAN_PROMISCUOUS |
3083 BE_IF_FLAGS_PROMISCUOUS;
3084
3085 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3086 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
3087 }
Sathya Perla4c876612013-02-03 20:30:11 +00003088
3089 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
3090 if (pos) {
3091 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
3092 &dev_num_vfs);
3093 if (BE3_chip(adapter))
3094 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
3095 adapter->dev_num_vfs = dev_num_vfs;
3096 }
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003097}
3098
Sathya Perla39f1d942012-05-08 19:41:24 +00003099/* Routine to query per function resource limits */
3100static int be_get_config(struct be_adapter *adapter)
3101{
Sathya Perla4c876612013-02-03 20:30:11 +00003102 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003103
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003104 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3105 &adapter->function_mode,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003106 &adapter->function_caps,
3107 &adapter->asic_rev);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003108 if (status)
3109 goto err;
3110
3111 be_get_resources(adapter);
3112
3113 /* primary mac needs 1 pmac entry */
3114 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3115 sizeof(u32), GFP_KERNEL);
3116 if (!adapter->pmac_id) {
3117 status = -ENOMEM;
3118 goto err;
3119 }
3120
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003121err:
3122 return status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003123}
3124
Sathya Perla5fb379e2009-06-18 00:02:59 +00003125static int be_setup(struct be_adapter *adapter)
3126{
Sathya Perla39f1d942012-05-08 19:41:24 +00003127 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003128 u32 en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003129 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003130 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003131 u8 mac[ETH_ALEN];
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003132 bool active_mac;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003133
Sathya Perla30128032011-11-10 19:17:57 +00003134 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003135
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003136 if (!lancer_chip(adapter))
3137 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003138
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003139 status = be_get_config(adapter);
3140 if (status)
3141 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003142
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003143 status = be_msix_enable(adapter);
3144 if (status)
3145 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003146
3147 status = be_evt_queues_create(adapter);
3148 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003149 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003150
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003151 status = be_tx_cqs_create(adapter);
3152 if (status)
3153 goto err;
3154
3155 status = be_rx_cqs_create(adapter);
3156 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003157 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003158
Sathya Perla5fb379e2009-06-18 00:02:59 +00003159 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003160 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003161 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003162
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003163 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3164 /* In UMC mode FW does not return right privileges.
3165 * Override with correct privilege equivalent to PF.
3166 */
3167 if (be_is_mc(adapter))
3168 adapter->cmd_privileges = MAX_PRIVILEGES;
3169
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003170 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3171 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00003172
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003173 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003174 en_flags |= BE_IF_FLAGS_RSS;
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003175
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003176 en_flags = en_flags & adapter->if_cap_flags;
Padmanabh Ratnakar0b13fb42012-07-18 02:51:58 +00003177
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003178 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003179 &adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003180 if (status != 0)
3181 goto err;
3182
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003183 memset(mac, 0, ETH_ALEN);
3184 active_mac = false;
3185 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3186 &active_mac, &adapter->pmac_id[0]);
3187 if (status != 0)
3188 goto err;
3189
3190 if (!active_mac) {
3191 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3192 &adapter->pmac_id[0], 0);
3193 if (status != 0)
3194 goto err;
3195 }
3196
3197 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3198 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3199 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003200 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00003201
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003202 status = be_tx_qs_create(adapter);
3203 if (status)
3204 goto err;
3205
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003206 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003207
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003208 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003209 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003210
3211 be_set_rx_mode(adapter->netdev);
3212
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003213 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003214
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003215 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3216 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003217 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003218
Sathya Perlab4c1df92013-05-08 02:05:47 +00003219 if (be_physfn(adapter)) {
Sathya Perla39f1d942012-05-08 19:41:24 +00003220 if (adapter->dev_num_vfs)
3221 be_vf_setup(adapter);
3222 else
3223 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003224 }
3225
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003226 status = be_cmd_get_phy_info(adapter);
3227 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003228 adapter->phy.fc_autoneg = 1;
3229
Sathya Perla191eb752012-02-23 18:50:13 +00003230 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3231 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003232 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003233err:
3234 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003235 return status;
3236}
3237
Ivan Vecera66268732011-12-08 01:31:21 +00003238#ifdef CONFIG_NET_POLL_CONTROLLER
3239static void be_netpoll(struct net_device *netdev)
3240{
3241 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003242 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003243 int i;
3244
Sathya Perlae49cc342012-11-27 19:50:02 +00003245 for_all_evt_queues(adapter, eqo, i) {
3246 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3247 napi_schedule(&eqo->napi);
3248 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003249
3250 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003251}
3252#endif
3253
Ajit Khaparde84517482009-09-04 03:12:16 +00003254#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003255char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3256
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003257static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003258 const u8 *p, u32 img_start, int image_size,
3259 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003260{
3261 u32 crc_offset;
3262 u8 flashed_crc[4];
3263 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003264
3265 crc_offset = hdr_size + img_start + image_size - 4;
3266
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003267 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003268
3269 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003270 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003271 if (status) {
3272 dev_err(&adapter->pdev->dev,
3273 "could not get crc from flash, not flashing redboot\n");
3274 return false;
3275 }
3276
3277 /*update redboot only if crc does not match*/
3278 if (!memcmp(flashed_crc, p, 4))
3279 return false;
3280 else
3281 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003282}
3283
Sathya Perla306f1342011-08-02 19:57:45 +00003284static bool phy_flashing_required(struct be_adapter *adapter)
3285{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003286 return (adapter->phy.phy_type == TN_8022 &&
3287 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003288}
3289
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003290static bool is_comp_in_ufi(struct be_adapter *adapter,
3291 struct flash_section_info *fsec, int type)
3292{
3293 int i = 0, img_type = 0;
3294 struct flash_section_info_g2 *fsec_g2 = NULL;
3295
Sathya Perlaca34fe32012-11-06 17:48:56 +00003296 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003297 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3298
3299 for (i = 0; i < MAX_FLASH_COMP; i++) {
3300 if (fsec_g2)
3301 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3302 else
3303 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3304
3305 if (img_type == type)
3306 return true;
3307 }
3308 return false;
3309
3310}
3311
3312struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3313 int header_size,
3314 const struct firmware *fw)
3315{
3316 struct flash_section_info *fsec = NULL;
3317 const u8 *p = fw->data;
3318
3319 p += header_size;
3320 while (p < (fw->data + fw->size)) {
3321 fsec = (struct flash_section_info *)p;
3322 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3323 return fsec;
3324 p += 32;
3325 }
3326 return NULL;
3327}
3328
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003329static int be_flash(struct be_adapter *adapter, const u8 *img,
3330 struct be_dma_mem *flash_cmd, int optype, int img_size)
3331{
3332 u32 total_bytes = 0, flash_op, num_bytes = 0;
3333 int status = 0;
3334 struct be_cmd_write_flashrom *req = flash_cmd->va;
3335
3336 total_bytes = img_size;
3337 while (total_bytes) {
3338 num_bytes = min_t(u32, 32*1024, total_bytes);
3339
3340 total_bytes -= num_bytes;
3341
3342 if (!total_bytes) {
3343 if (optype == OPTYPE_PHY_FW)
3344 flash_op = FLASHROM_OPER_PHY_FLASH;
3345 else
3346 flash_op = FLASHROM_OPER_FLASH;
3347 } else {
3348 if (optype == OPTYPE_PHY_FW)
3349 flash_op = FLASHROM_OPER_PHY_SAVE;
3350 else
3351 flash_op = FLASHROM_OPER_SAVE;
3352 }
3353
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003354 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003355 img += num_bytes;
3356 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3357 flash_op, num_bytes);
3358 if (status) {
3359 if (status == ILLEGAL_IOCTL_REQ &&
3360 optype == OPTYPE_PHY_FW)
3361 break;
3362 dev_err(&adapter->pdev->dev,
3363 "cmd to write to flash rom failed.\n");
3364 return status;
3365 }
3366 }
3367 return 0;
3368}
3369
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003370/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003371static int be_flash_BEx(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003372 const struct firmware *fw,
3373 struct be_dma_mem *flash_cmd,
3374 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003375
Ajit Khaparde84517482009-09-04 03:12:16 +00003376{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003377 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003378 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003379 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003380 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003381 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003382 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003383
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003384 struct flash_comp gen3_flash_types[] = {
3385 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3386 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3387 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3388 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3389 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3390 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3391 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3392 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3393 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3394 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3395 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3396 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3397 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3398 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3399 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3400 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3401 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3402 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3403 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3404 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003405 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003406
3407 struct flash_comp gen2_flash_types[] = {
3408 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3409 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3410 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3411 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3412 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3413 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3414 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3415 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3416 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3417 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3418 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3419 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3420 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3421 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3422 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3423 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003424 };
3425
Sathya Perlaca34fe32012-11-06 17:48:56 +00003426 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003427 pflashcomp = gen3_flash_types;
3428 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003429 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003430 } else {
3431 pflashcomp = gen2_flash_types;
3432 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003433 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003434 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003435
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003436 /* Get flash section info*/
3437 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3438 if (!fsec) {
3439 dev_err(&adapter->pdev->dev,
3440 "Invalid Cookie. UFI corrupted ?\n");
3441 return -1;
3442 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003443 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003444 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003445 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003446
3447 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3448 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3449 continue;
3450
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003451 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3452 !phy_flashing_required(adapter))
3453 continue;
3454
3455 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3456 redboot = be_flash_redboot(adapter, fw->data,
3457 pflashcomp[i].offset, pflashcomp[i].size,
3458 filehdr_size + img_hdrs_size);
3459 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003460 continue;
3461 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003462
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003463 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003464 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003465 if (p + pflashcomp[i].size > fw->data + fw->size)
3466 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003467
3468 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3469 pflashcomp[i].size);
3470 if (status) {
3471 dev_err(&adapter->pdev->dev,
3472 "Flashing section type %d failed.\n",
3473 pflashcomp[i].img_type);
3474 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003475 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003476 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003477 return 0;
3478}
3479
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003480static int be_flash_skyhawk(struct be_adapter *adapter,
3481 const struct firmware *fw,
3482 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003483{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003484 int status = 0, i, filehdr_size = 0;
3485 int img_offset, img_size, img_optype, redboot;
3486 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3487 const u8 *p = fw->data;
3488 struct flash_section_info *fsec = NULL;
3489
3490 filehdr_size = sizeof(struct flash_file_hdr_g3);
3491 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3492 if (!fsec) {
3493 dev_err(&adapter->pdev->dev,
3494 "Invalid Cookie. UFI corrupted ?\n");
3495 return -1;
3496 }
3497
3498 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3499 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3500 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3501
3502 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3503 case IMAGE_FIRMWARE_iSCSI:
3504 img_optype = OPTYPE_ISCSI_ACTIVE;
3505 break;
3506 case IMAGE_BOOT_CODE:
3507 img_optype = OPTYPE_REDBOOT;
3508 break;
3509 case IMAGE_OPTION_ROM_ISCSI:
3510 img_optype = OPTYPE_BIOS;
3511 break;
3512 case IMAGE_OPTION_ROM_PXE:
3513 img_optype = OPTYPE_PXE_BIOS;
3514 break;
3515 case IMAGE_OPTION_ROM_FCoE:
3516 img_optype = OPTYPE_FCOE_BIOS;
3517 break;
3518 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3519 img_optype = OPTYPE_ISCSI_BACKUP;
3520 break;
3521 case IMAGE_NCSI:
3522 img_optype = OPTYPE_NCSI_FW;
3523 break;
3524 default:
3525 continue;
3526 }
3527
3528 if (img_optype == OPTYPE_REDBOOT) {
3529 redboot = be_flash_redboot(adapter, fw->data,
3530 img_offset, img_size,
3531 filehdr_size + img_hdrs_size);
3532 if (!redboot)
3533 continue;
3534 }
3535
3536 p = fw->data;
3537 p += filehdr_size + img_offset + img_hdrs_size;
3538 if (p + img_size > fw->data + fw->size)
3539 return -1;
3540
3541 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3542 if (status) {
3543 dev_err(&adapter->pdev->dev,
3544 "Flashing section type %d failed.\n",
3545 fsec->fsec_entry[i].type);
3546 return status;
3547 }
3548 }
3549 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003550}
3551
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003552static int lancer_wait_idle(struct be_adapter *adapter)
3553{
3554#define SLIPORT_IDLE_TIMEOUT 30
3555 u32 reg_val;
3556 int status = 0, i;
3557
3558 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3559 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3560 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3561 break;
3562
3563 ssleep(1);
3564 }
3565
3566 if (i == SLIPORT_IDLE_TIMEOUT)
3567 status = -1;
3568
3569 return status;
3570}
3571
3572static int lancer_fw_reset(struct be_adapter *adapter)
3573{
3574 int status = 0;
3575
3576 status = lancer_wait_idle(adapter);
3577 if (status)
3578 return status;
3579
3580 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3581 PHYSDEV_CONTROL_OFFSET);
3582
3583 return status;
3584}
3585
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003586static int lancer_fw_download(struct be_adapter *adapter,
3587 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003588{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003589#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3590#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3591 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003592 const u8 *data_ptr = NULL;
3593 u8 *dest_image_ptr = NULL;
3594 size_t image_size = 0;
3595 u32 chunk_size = 0;
3596 u32 data_written = 0;
3597 u32 offset = 0;
3598 int status = 0;
3599 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003600 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003601
3602 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3603 dev_err(&adapter->pdev->dev,
3604 "FW Image not properly aligned. "
3605 "Length must be 4 byte aligned.\n");
3606 status = -EINVAL;
3607 goto lancer_fw_exit;
3608 }
3609
3610 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3611 + LANCER_FW_DOWNLOAD_CHUNK;
3612 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00003613 &flash_cmd.dma, GFP_KERNEL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003614 if (!flash_cmd.va) {
3615 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003616 goto lancer_fw_exit;
3617 }
3618
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003619 dest_image_ptr = flash_cmd.va +
3620 sizeof(struct lancer_cmd_req_write_object);
3621 image_size = fw->size;
3622 data_ptr = fw->data;
3623
3624 while (image_size) {
3625 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3626
3627 /* Copy the image chunk content. */
3628 memcpy(dest_image_ptr, data_ptr, chunk_size);
3629
3630 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003631 chunk_size, offset,
3632 LANCER_FW_DOWNLOAD_LOCATION,
3633 &data_written, &change_status,
3634 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003635 if (status)
3636 break;
3637
3638 offset += data_written;
3639 data_ptr += data_written;
3640 image_size -= data_written;
3641 }
3642
3643 if (!status) {
3644 /* Commit the FW written */
3645 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003646 0, offset,
3647 LANCER_FW_DOWNLOAD_LOCATION,
3648 &data_written, &change_status,
3649 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003650 }
3651
3652 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3653 flash_cmd.dma);
3654 if (status) {
3655 dev_err(&adapter->pdev->dev,
3656 "Firmware load error. "
3657 "Status code: 0x%x Additional Status: 0x%x\n",
3658 status, add_status);
3659 goto lancer_fw_exit;
3660 }
3661
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003662 if (change_status == LANCER_FW_RESET_NEEDED) {
3663 status = lancer_fw_reset(adapter);
3664 if (status) {
3665 dev_err(&adapter->pdev->dev,
3666 "Adapter busy for FW reset.\n"
3667 "New FW will not be active.\n");
3668 goto lancer_fw_exit;
3669 }
3670 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3671 dev_err(&adapter->pdev->dev,
3672 "System reboot required for new FW"
3673 " to be active\n");
3674 }
3675
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003676 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3677lancer_fw_exit:
3678 return status;
3679}
3680
Sathya Perlaca34fe32012-11-06 17:48:56 +00003681#define UFI_TYPE2 2
3682#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003683#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00003684#define UFI_TYPE4 4
3685static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003686 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003687{
3688 if (fhdr == NULL)
3689 goto be_get_ufi_exit;
3690
Sathya Perlaca34fe32012-11-06 17:48:56 +00003691 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3692 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003693 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3694 if (fhdr->asic_type_rev == 0x10)
3695 return UFI_TYPE3R;
3696 else
3697 return UFI_TYPE3;
3698 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00003699 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003700
3701be_get_ufi_exit:
3702 dev_err(&adapter->pdev->dev,
3703 "UFI and Interface are not compatible for flashing\n");
3704 return -1;
3705}
3706
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003707static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3708{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003709 struct flash_file_hdr_g3 *fhdr3;
3710 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003711 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003712 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003713 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003714
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003715 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003716 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3717 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003718 if (!flash_cmd.va) {
3719 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003720 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003721 }
3722
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003723 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003724 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003725
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003726 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003727
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003728 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3729 for (i = 0; i < num_imgs; i++) {
3730 img_hdr_ptr = (struct image_hdr *)(fw->data +
3731 (sizeof(struct flash_file_hdr_g3) +
3732 i * sizeof(struct image_hdr)));
3733 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003734 switch (ufi_type) {
3735 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003736 status = be_flash_skyhawk(adapter, fw,
3737 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003738 break;
3739 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00003740 status = be_flash_BEx(adapter, fw, &flash_cmd,
3741 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003742 break;
3743 case UFI_TYPE3:
3744 /* Do not flash this ufi on BE3-R cards */
3745 if (adapter->asic_rev < 0x10)
3746 status = be_flash_BEx(adapter, fw,
3747 &flash_cmd,
3748 num_imgs);
3749 else {
3750 status = -1;
3751 dev_err(&adapter->pdev->dev,
3752 "Can't load BE3 UFI on BE3R\n");
3753 }
3754 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003755 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003756 }
3757
Sathya Perlaca34fe32012-11-06 17:48:56 +00003758 if (ufi_type == UFI_TYPE2)
3759 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003760 else if (ufi_type == -1)
3761 status = -1;
3762
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003763 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3764 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003765 if (status) {
3766 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003767 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003768 }
3769
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003770 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003771
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003772be_fw_exit:
3773 return status;
3774}
3775
3776int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3777{
3778 const struct firmware *fw;
3779 int status;
3780
3781 if (!netif_running(adapter->netdev)) {
3782 dev_err(&adapter->pdev->dev,
3783 "Firmware load not allowed (interface is down)\n");
3784 return -1;
3785 }
3786
3787 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3788 if (status)
3789 goto fw_exit;
3790
3791 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3792
3793 if (lancer_chip(adapter))
3794 status = lancer_fw_download(adapter, fw);
3795 else
3796 status = be_fw_download(adapter, fw);
3797
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003798 if (!status)
3799 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3800 adapter->fw_on_flash);
3801
Ajit Khaparde84517482009-09-04 03:12:16 +00003802fw_exit:
3803 release_firmware(fw);
3804 return status;
3805}
3806
stephen hemmingere5686ad2012-01-05 19:10:25 +00003807static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003808 .ndo_open = be_open,
3809 .ndo_stop = be_close,
3810 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003811 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003812 .ndo_set_mac_address = be_mac_addr_set,
3813 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003814 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003815 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003816 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3817 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003818 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003819 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003820 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003821 .ndo_get_vf_config = be_get_vf_config,
3822#ifdef CONFIG_NET_POLL_CONTROLLER
3823 .ndo_poll_controller = be_netpoll,
3824#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003825};
3826
3827static void be_netdev_init(struct net_device *netdev)
3828{
3829 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003830 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003831 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003832
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003833 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003834 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00003835 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003836 if (be_multi_rxq(adapter))
3837 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003838
3839 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00003840 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003841
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003842 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003843 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003844
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003845 netdev->priv_flags |= IFF_UNICAST_FLT;
3846
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003847 netdev->flags |= IFF_MULTICAST;
3848
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00003849 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003850
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003851 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003852
3853 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3854
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003855 for_all_evt_queues(adapter, eqo, i)
3856 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003857}
3858
3859static void be_unmap_pci_bars(struct be_adapter *adapter)
3860{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003861 if (adapter->csr)
3862 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003863 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00003864 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003865}
3866
Sathya Perlace66f782012-11-06 17:48:58 +00003867static int db_bar(struct be_adapter *adapter)
3868{
3869 if (lancer_chip(adapter) || !be_physfn(adapter))
3870 return 0;
3871 else
3872 return 4;
3873}
3874
3875static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00003876{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00003877 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00003878 adapter->roce_db.size = 4096;
3879 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3880 db_bar(adapter));
3881 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3882 db_bar(adapter));
3883 }
Parav Pandit045508a2012-03-26 14:27:13 +00003884 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003885}
3886
3887static int be_map_pci_bars(struct be_adapter *adapter)
3888{
3889 u8 __iomem *addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003890 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003891
Sathya Perlace66f782012-11-06 17:48:58 +00003892 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3893 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3894 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003895
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003896 if (BEx_chip(adapter) && be_physfn(adapter)) {
3897 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3898 if (adapter->csr == NULL)
3899 return -ENOMEM;
3900 }
3901
Sathya Perlace66f782012-11-06 17:48:58 +00003902 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003903 if (addr == NULL)
3904 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003905 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003906
3907 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003908 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00003909
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003910pci_map_err:
3911 be_unmap_pci_bars(adapter);
3912 return -ENOMEM;
3913}
3914
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003915static void be_ctrl_cleanup(struct be_adapter *adapter)
3916{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003917 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003918
3919 be_unmap_pci_bars(adapter);
3920
3921 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003922 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3923 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003924
Sathya Perla5b8821b2011-08-02 19:57:44 +00003925 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003926 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003927 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3928 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003929}
3930
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003931static int be_ctrl_init(struct be_adapter *adapter)
3932{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003933 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3934 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003935 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00003936 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003937 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003938
Sathya Perlace66f782012-11-06 17:48:58 +00003939 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3940 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3941 SLI_INTF_FAMILY_SHIFT;
3942 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3943
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003944 status = be_map_pci_bars(adapter);
3945 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003946 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003947
3948 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003949 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3950 mbox_mem_alloc->size,
3951 &mbox_mem_alloc->dma,
3952 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003953 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003954 status = -ENOMEM;
3955 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003956 }
3957 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3958 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3959 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3960 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003961
Sathya Perla5b8821b2011-08-02 19:57:44 +00003962 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3963 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
Joe Perches1f9061d22013-03-15 07:23:58 +00003964 &rx_filter->dma,
3965 GFP_KERNEL | __GFP_ZERO);
Sathya Perla5b8821b2011-08-02 19:57:44 +00003966 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003967 status = -ENOMEM;
3968 goto free_mbox;
3969 }
Joe Perches1f9061d22013-03-15 07:23:58 +00003970
Ivan Vecera29849612010-12-14 05:43:19 +00003971 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003972 spin_lock_init(&adapter->mcc_lock);
3973 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003974
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003975 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003976 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003977 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003978
3979free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003980 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3981 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003982
3983unmap_pci_bars:
3984 be_unmap_pci_bars(adapter);
3985
3986done:
3987 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003988}
3989
3990static void be_stats_cleanup(struct be_adapter *adapter)
3991{
Sathya Perla3abcded2010-10-03 22:12:27 -07003992 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003993
3994 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003995 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3996 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003997}
3998
3999static int be_stats_init(struct be_adapter *adapter)
4000{
Sathya Perla3abcded2010-10-03 22:12:27 -07004001 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004002
Sathya Perlaca34fe32012-11-06 17:48:56 +00004003 if (lancer_chip(adapter))
4004 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4005 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004006 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004007 else
4008 /* BE3 and Skyhawk */
4009 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
4010
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004011 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
Joe Perches1f9061d22013-03-15 07:23:58 +00004012 GFP_KERNEL | __GFP_ZERO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004013 if (cmd->va == NULL)
4014 return -1;
4015 return 0;
4016}
4017
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004018static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004019{
4020 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004021
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004022 if (!adapter)
4023 return;
4024
Parav Pandit045508a2012-03-26 14:27:13 +00004025 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004026 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004027
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004028 cancel_delayed_work_sync(&adapter->func_recovery_work);
4029
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004030 unregister_netdev(adapter->netdev);
4031
Sathya Perla5fb379e2009-06-18 00:02:59 +00004032 be_clear(adapter);
4033
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004034 /* tell fw we're done with firing cmds */
4035 be_cmd_fw_clean(adapter);
4036
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004037 be_stats_cleanup(adapter);
4038
4039 be_ctrl_cleanup(adapter);
4040
Sathya Perlad6b6d982012-09-05 01:56:48 +00004041 pci_disable_pcie_error_reporting(pdev);
4042
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004043 pci_set_drvdata(pdev, NULL);
4044 pci_release_regions(pdev);
4045 pci_disable_device(pdev);
4046
4047 free_netdev(adapter->netdev);
4048}
4049
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004050bool be_is_wol_supported(struct be_adapter *adapter)
4051{
4052 return ((adapter->wol_cap & BE_WOL_CAP) &&
4053 !be_is_wol_excluded(adapter)) ? true : false;
4054}
4055
Somnath Kotur941a77d2012-05-17 22:59:03 +00004056u32 be_get_fw_log_level(struct be_adapter *adapter)
4057{
4058 struct be_dma_mem extfat_cmd;
4059 struct be_fat_conf_params *cfgs;
4060 int status;
4061 u32 level = 0;
4062 int j;
4063
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004064 if (lancer_chip(adapter))
4065 return 0;
4066
Somnath Kotur941a77d2012-05-17 22:59:03 +00004067 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4068 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4069 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4070 &extfat_cmd.dma);
4071
4072 if (!extfat_cmd.va) {
4073 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4074 __func__);
4075 goto err;
4076 }
4077
4078 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4079 if (!status) {
4080 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4081 sizeof(struct be_cmd_resp_hdr));
Anton Blanchardac46a462012-07-24 15:05:25 +00004082 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
Somnath Kotur941a77d2012-05-17 22:59:03 +00004083 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4084 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4085 }
4086 }
4087 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4088 extfat_cmd.dma);
4089err:
4090 return level;
4091}
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004092
Sathya Perla39f1d942012-05-08 19:41:24 +00004093static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004094{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004095 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00004096 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004097
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004098 status = be_cmd_get_cntl_attributes(adapter);
4099 if (status)
4100 return status;
4101
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004102 status = be_cmd_get_acpi_wol_cap(adapter);
4103 if (status) {
4104 /* in case of a failure to get wol capabillities
4105 * check the exclusion list to determine WOL capability */
4106 if (!be_is_wol_excluded(adapter))
4107 adapter->wol_cap |= BE_WOL_CAP;
4108 }
4109
4110 if (be_is_wol_supported(adapter))
4111 adapter->wol = true;
4112
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004113 /* Must be a power of 2 or else MODULO will BUG_ON */
4114 adapter->be_get_temp_freq = 64;
4115
Somnath Kotur941a77d2012-05-17 22:59:03 +00004116 level = be_get_fw_log_level(adapter);
4117 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4118
Sathya Perla2243e2e2009-11-22 22:02:03 +00004119 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004120}
4121
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004122static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004123{
4124 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004125
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004126 status = lancer_test_and_set_rdy_state(adapter);
4127 if (status)
4128 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004129
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004130 if (netif_running(adapter->netdev))
4131 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004132
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004133 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004134
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004135 adapter->hw_error = false;
4136 adapter->fw_timeout = false;
4137
4138 status = be_setup(adapter);
4139 if (status)
4140 goto err;
4141
4142 if (netif_running(adapter->netdev)) {
4143 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004144 if (status)
4145 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004146 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004147
4148 dev_err(&adapter->pdev->dev,
4149 "Adapter SLIPORT recovery succeeded\n");
4150 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004151err:
Padmanabh Ratnakar67297ad2012-10-20 06:02:27 +00004152 if (adapter->eeh_error)
4153 dev_err(&adapter->pdev->dev,
4154 "Adapter SLIPORT recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004155
4156 return status;
4157}
4158
4159static void be_func_recovery_task(struct work_struct *work)
4160{
4161 struct be_adapter *adapter =
4162 container_of(work, struct be_adapter, func_recovery_work.work);
4163 int status;
4164
4165 be_detect_error(adapter);
4166
4167 if (adapter->hw_error && lancer_chip(adapter)) {
4168
4169 if (adapter->eeh_error)
4170 goto out;
4171
4172 rtnl_lock();
4173 netif_device_detach(adapter->netdev);
4174 rtnl_unlock();
4175
4176 status = lancer_recover_func(adapter);
4177
4178 if (!status)
4179 netif_device_attach(adapter->netdev);
4180 }
4181
4182out:
4183 schedule_delayed_work(&adapter->func_recovery_work,
4184 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004185}
4186
4187static void be_worker(struct work_struct *work)
4188{
4189 struct be_adapter *adapter =
4190 container_of(work, struct be_adapter, work.work);
4191 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004192 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004193 int i;
4194
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004195 /* when interrupts are not yet enabled, just reap any pending
4196 * mcc completions */
4197 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004198 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004199 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004200 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004201 goto reschedule;
4202 }
4203
4204 if (!adapter->stats_cmd_sent) {
4205 if (lancer_chip(adapter))
4206 lancer_cmd_get_pport_stats(adapter,
4207 &adapter->stats_cmd);
4208 else
4209 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4210 }
4211
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004212 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4213 be_cmd_get_die_temperature(adapter);
4214
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004215 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004216 if (rxo->rx_post_starved) {
4217 rxo->rx_post_starved = false;
4218 be_post_rx_frags(rxo, GFP_KERNEL);
4219 }
4220 }
4221
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004222 for_all_evt_queues(adapter, eqo, i)
4223 be_eqd_update(adapter, eqo);
4224
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004225reschedule:
4226 adapter->work_counter++;
4227 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4228}
4229
Sathya Perla39f1d942012-05-08 19:41:24 +00004230static bool be_reset_required(struct be_adapter *adapter)
4231{
Sathya Perlad79c0a22012-06-05 19:37:22 +00004232 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004233}
4234
Sathya Perlad3791422012-09-28 04:39:44 +00004235static char *mc_name(struct be_adapter *adapter)
4236{
4237 if (adapter->function_mode & FLEX10_MODE)
4238 return "FLEX10";
4239 else if (adapter->function_mode & VNIC_MODE)
4240 return "vNIC";
4241 else if (adapter->function_mode & UMC_ENABLED)
4242 return "UMC";
4243 else
4244 return "";
4245}
4246
4247static inline char *func_name(struct be_adapter *adapter)
4248{
4249 return be_physfn(adapter) ? "PF" : "VF";
4250}
4251
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004252static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004253{
4254 int status = 0;
4255 struct be_adapter *adapter;
4256 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004257 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004258
4259 status = pci_enable_device(pdev);
4260 if (status)
4261 goto do_none;
4262
4263 status = pci_request_regions(pdev, DRV_NAME);
4264 if (status)
4265 goto disable_dev;
4266 pci_set_master(pdev);
4267
Sathya Perla7f640062012-06-05 19:37:20 +00004268 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004269 if (netdev == NULL) {
4270 status = -ENOMEM;
4271 goto rel_reg;
4272 }
4273 adapter = netdev_priv(netdev);
4274 adapter->pdev = pdev;
4275 pci_set_drvdata(pdev, adapter);
4276 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004277 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004278
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004279 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004280 if (!status) {
Craig Hada2bd92cd2013-04-21 23:28:18 +00004281 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4282 if (status < 0) {
4283 dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4284 goto free_netdev;
4285 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004286 netdev->features |= NETIF_F_HIGHDMA;
4287 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004288 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004289 if (status) {
4290 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4291 goto free_netdev;
4292 }
4293 }
4294
Sathya Perlad6b6d982012-09-05 01:56:48 +00004295 status = pci_enable_pcie_error_reporting(pdev);
4296 if (status)
4297 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4298
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004299 status = be_ctrl_init(adapter);
4300 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004301 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004302
Sathya Perla2243e2e2009-11-22 22:02:03 +00004303 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004304 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004305 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004306 if (status)
4307 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004308 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004309
Sathya Perla39f1d942012-05-08 19:41:24 +00004310 if (be_reset_required(adapter)) {
4311 status = be_cmd_reset_function(adapter);
4312 if (status)
4313 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004314
Kalesh AP2d177be2013-04-28 22:22:29 +00004315 /* Wait for interrupts to quiesce after an FLR */
4316 msleep(100);
4317 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004318
4319 /* Allow interrupts for other ULPs running on NIC function */
4320 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004321
Kalesh AP2d177be2013-04-28 22:22:29 +00004322 /* tell fw we're ready to fire cmds */
4323 status = be_cmd_fw_init(adapter);
4324 if (status)
4325 goto ctrl_clean;
4326
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004327 status = be_stats_init(adapter);
4328 if (status)
4329 goto ctrl_clean;
4330
Sathya Perla39f1d942012-05-08 19:41:24 +00004331 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004332 if (status)
4333 goto stats_clean;
4334
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004335 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004336 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004337 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004338
Sathya Perla5fb379e2009-06-18 00:02:59 +00004339 status = be_setup(adapter);
4340 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004341 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004342
Sathya Perla3abcded2010-10-03 22:12:27 -07004343 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004344 status = register_netdev(netdev);
4345 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004346 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004347
Parav Pandit045508a2012-03-26 14:27:13 +00004348 be_roce_dev_add(adapter);
4349
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004350 schedule_delayed_work(&adapter->func_recovery_work,
4351 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004352
4353 be_cmd_query_port_name(adapter, &port_name);
4354
Sathya Perlad3791422012-09-28 04:39:44 +00004355 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4356 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004357
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004358 return 0;
4359
Sathya Perla5fb379e2009-06-18 00:02:59 +00004360unsetup:
4361 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004362stats_clean:
4363 be_stats_cleanup(adapter);
4364ctrl_clean:
4365 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004366free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004367 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004368 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004369rel_reg:
4370 pci_release_regions(pdev);
4371disable_dev:
4372 pci_disable_device(pdev);
4373do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004374 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004375 return status;
4376}
4377
4378static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4379{
4380 struct be_adapter *adapter = pci_get_drvdata(pdev);
4381 struct net_device *netdev = adapter->netdev;
4382
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004383 if (adapter->wol)
4384 be_setup_wol(adapter, true);
4385
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004386 cancel_delayed_work_sync(&adapter->func_recovery_work);
4387
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004388 netif_device_detach(netdev);
4389 if (netif_running(netdev)) {
4390 rtnl_lock();
4391 be_close(netdev);
4392 rtnl_unlock();
4393 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004394 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004395
4396 pci_save_state(pdev);
4397 pci_disable_device(pdev);
4398 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4399 return 0;
4400}
4401
4402static int be_resume(struct pci_dev *pdev)
4403{
4404 int status = 0;
4405 struct be_adapter *adapter = pci_get_drvdata(pdev);
4406 struct net_device *netdev = adapter->netdev;
4407
4408 netif_device_detach(netdev);
4409
4410 status = pci_enable_device(pdev);
4411 if (status)
4412 return status;
4413
4414 pci_set_power_state(pdev, 0);
4415 pci_restore_state(pdev);
4416
Sathya Perla2243e2e2009-11-22 22:02:03 +00004417 /* tell fw we're ready to fire cmds */
4418 status = be_cmd_fw_init(adapter);
4419 if (status)
4420 return status;
4421
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004422 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004423 if (netif_running(netdev)) {
4424 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004425 be_open(netdev);
4426 rtnl_unlock();
4427 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004428
4429 schedule_delayed_work(&adapter->func_recovery_work,
4430 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004431 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004432
4433 if (adapter->wol)
4434 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004435
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004436 return 0;
4437}
4438
Sathya Perla82456b02010-02-17 01:35:37 +00004439/*
4440 * An FLR will stop BE from DMAing any data.
4441 */
4442static void be_shutdown(struct pci_dev *pdev)
4443{
4444 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004445
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004446 if (!adapter)
4447 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004448
Sathya Perla0f4a6822011-03-21 20:49:28 +00004449 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004450 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004451
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004452 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004453
Ajit Khaparde57841862011-04-06 18:08:43 +00004454 be_cmd_reset_function(adapter);
4455
Sathya Perla82456b02010-02-17 01:35:37 +00004456 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004457}
4458
Sathya Perlacf588472010-02-14 21:22:01 +00004459static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4460 pci_channel_state_t state)
4461{
4462 struct be_adapter *adapter = pci_get_drvdata(pdev);
4463 struct net_device *netdev = adapter->netdev;
4464
4465 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4466
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004467 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004468
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004469 cancel_delayed_work_sync(&adapter->func_recovery_work);
4470
4471 rtnl_lock();
Sathya Perlacf588472010-02-14 21:22:01 +00004472 netif_device_detach(netdev);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004473 rtnl_unlock();
Sathya Perlacf588472010-02-14 21:22:01 +00004474
4475 if (netif_running(netdev)) {
4476 rtnl_lock();
4477 be_close(netdev);
4478 rtnl_unlock();
4479 }
4480 be_clear(adapter);
4481
4482 if (state == pci_channel_io_perm_failure)
4483 return PCI_ERS_RESULT_DISCONNECT;
4484
4485 pci_disable_device(pdev);
4486
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004487 /* The error could cause the FW to trigger a flash debug dump.
4488 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004489 * can cause it not to recover; wait for it to finish.
4490 * Wait only for first function as it is needed only once per
4491 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004492 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004493 if (pdev->devfn == 0)
4494 ssleep(30);
4495
Sathya Perlacf588472010-02-14 21:22:01 +00004496 return PCI_ERS_RESULT_NEED_RESET;
4497}
4498
4499static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4500{
4501 struct be_adapter *adapter = pci_get_drvdata(pdev);
4502 int status;
4503
4504 dev_info(&adapter->pdev->dev, "EEH reset\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004505 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004506
4507 status = pci_enable_device(pdev);
4508 if (status)
4509 return PCI_ERS_RESULT_DISCONNECT;
4510
4511 pci_set_master(pdev);
4512 pci_set_power_state(pdev, 0);
4513 pci_restore_state(pdev);
4514
4515 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004516 dev_info(&adapter->pdev->dev,
4517 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004518 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004519 if (status)
4520 return PCI_ERS_RESULT_DISCONNECT;
4521
Sathya Perlad6b6d982012-09-05 01:56:48 +00004522 pci_cleanup_aer_uncorrect_error_status(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004523 return PCI_ERS_RESULT_RECOVERED;
4524}
4525
4526static void be_eeh_resume(struct pci_dev *pdev)
4527{
4528 int status = 0;
4529 struct be_adapter *adapter = pci_get_drvdata(pdev);
4530 struct net_device *netdev = adapter->netdev;
4531
4532 dev_info(&adapter->pdev->dev, "EEH resume\n");
4533
4534 pci_save_state(pdev);
4535
Kalesh AP2d177be2013-04-28 22:22:29 +00004536 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004537 if (status)
4538 goto err;
4539
Kalesh AP2d177be2013-04-28 22:22:29 +00004540 /* tell fw we're ready to fire cmds */
4541 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004542 if (status)
4543 goto err;
4544
Sathya Perlacf588472010-02-14 21:22:01 +00004545 status = be_setup(adapter);
4546 if (status)
4547 goto err;
4548
4549 if (netif_running(netdev)) {
4550 status = be_open(netdev);
4551 if (status)
4552 goto err;
4553 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004554
4555 schedule_delayed_work(&adapter->func_recovery_work,
4556 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004557 netif_device_attach(netdev);
4558 return;
4559err:
4560 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004561}
4562
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004563static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004564 .error_detected = be_eeh_err_detected,
4565 .slot_reset = be_eeh_reset,
4566 .resume = be_eeh_resume,
4567};
4568
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004569static struct pci_driver be_driver = {
4570 .name = DRV_NAME,
4571 .id_table = be_dev_ids,
4572 .probe = be_probe,
4573 .remove = be_remove,
4574 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004575 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004576 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004577 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004578};
4579
4580static int __init be_init_module(void)
4581{
Joe Perches8e95a202009-12-03 07:58:21 +00004582 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4583 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004584 printk(KERN_WARNING DRV_NAME
4585 " : Module param rx_frag_size must be 2048/4096/8192."
4586 " Using 2048\n");
4587 rx_frag_size = 2048;
4588 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004589
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004590 return pci_register_driver(&be_driver);
4591}
4592module_init(be_init_module);
4593
4594static void __exit be_exit_module(void)
4595{
4596 pci_unregister_driver(&be_driver);
4597}
4598module_exit(be_exit_module);