blob: 98efc29eaa55631951faaae5fc298143ab6c1221 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamc7bb15a2013-03-06 20:05:05 +00002 * Copyright (C) 2005 - 2013 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070024
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000028MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_LICENSE("GPL");
30
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla11ac75e2011-12-13 00:58:50 +000035static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070048 { 0 }
49};
50MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000051/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070052static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000053 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85};
86/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070087static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000088 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700111 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700121
Sathya Perla752961a2011-10-24 02:45:03 +0000122/* Is BE in a multi-channel mode */
123static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127}
128
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700129static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130{
131 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000135 mem->va = NULL;
136 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
Joe Perches1f9061d22013-03-15 07:23:58 +0000149 GFP_KERNEL | __GFP_ZERO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000151 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 return 0;
153}
154
Somnath Kotur68c45a22013-03-14 02:42:07 +0000155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169
Sathya Perladb3ea782011-08-22 19:41:52 +0000170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Somnath Kotur68c45a22013-03-14 02:42:07 +0000174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
Sathya Perla8788fdc2009-07-27 22:52:03 +0000190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700191{
192 u32 val = 0;
193 val |= qid & DB_RQ_RING_ID_MASK;
194 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000195
196 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000197 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700198}
199
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000200static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
201 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202{
203 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000206
207 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000208 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209}
210
Sathya Perla8788fdc2009-07-27 22:52:03 +0000211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700212 bool arm, bool clear_int, u16 num_popped)
213{
214 u32 val = 0;
215 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000216 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000218
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000219 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000220 return;
221
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700229}
230
Sathya Perla8788fdc2009-07-27 22:52:03 +0000231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232{
233 u32 val = 0;
234 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000235 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000237
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000238 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000239 return;
240
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241 if (arm)
242 val |= 1 << DB_CQ_REARM_SHIFT;
243 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000244 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245}
246
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247static int be_mac_addr_set(struct net_device *netdev, void *p)
248{
249 struct be_adapter *adapter = netdev_priv(netdev);
250 struct sockaddr *addr = p;
251 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000252 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000253 u32 pmac_id = adapter->pmac_id[0];
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000254 bool active_mac = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700255
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000256 if (!is_valid_ether_addr(addr->sa_data))
257 return -EADDRNOTAVAIL;
258
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000259 /* For BE VF, MAC address is already activated by PF.
260 * Hence only operation left is updating netdev->devaddr.
261 * Update it if user is passing the same MAC which was used
262 * during configuring VF MAC from PF(Hypervisor).
263 */
264 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
265 status = be_cmd_mac_addr_query(adapter, current_mac,
266 false, adapter->if_handle, 0);
267 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
268 goto done;
269 else
270 goto err;
271 }
272
273 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
274 goto done;
275
276 /* For Lancer check if any MAC is active.
277 * If active, get its mac id.
278 */
279 if (lancer_chip(adapter) && !be_physfn(adapter))
280 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
281 &pmac_id, 0);
282
283 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
284 adapter->if_handle,
285 &adapter->pmac_id[0], 0);
286
Sathya Perlaa65027e2009-08-17 00:58:04 +0000287 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000288 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700289
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000290 if (active_mac)
291 be_cmd_pmac_del(adapter, adapter->if_handle,
292 pmac_id, 0);
293done:
Somnath Koture3a7ae22011-10-27 07:14:05 +0000294 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
295 return 0;
296err:
297 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700298 return status;
299}
300
Sathya Perlaca34fe32012-11-06 17:48:56 +0000301/* BE2 supports only v0 cmd */
302static void *hw_stats_from_cmd(struct be_adapter *adapter)
303{
304 if (BE2_chip(adapter)) {
305 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
306
307 return &cmd->hw_stats;
308 } else {
309 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
310
311 return &cmd->hw_stats;
312 }
313}
314
315/* BE2 supports only v0 cmd */
316static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
317{
318 if (BE2_chip(adapter)) {
319 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
320
321 return &hw_stats->erx;
322 } else {
323 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
324
325 return &hw_stats->erx;
326 }
327}
328
329static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000330{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000334 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000335 &rxf_stats->port[adapter->port_num];
336 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000337
Sathya Perlaac124ff2011-07-25 19:10:14 +0000338 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000339 drvs->rx_pause_frames = port_stats->rx_pause_frames;
340 drvs->rx_crc_errors = port_stats->rx_crc_errors;
341 drvs->rx_control_frames = port_stats->rx_control_frames;
342 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000354 drvs->rx_dropped_header_too_small =
355 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000356 drvs->rx_address_filtered =
357 port_stats->rx_address_filtered +
358 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000359 drvs->rx_alignment_symbol_errors =
360 port_stats->rx_alignment_symbol_errors;
361
362 drvs->tx_pauseframes = port_stats->tx_pauseframes;
363 drvs->tx_controlframes = port_stats->tx_controlframes;
364
365 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000366 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000367 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000368 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000369 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000371 drvs->forwarded_packets = rxf_stats->forwarded_packets;
372 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000373 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000375 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
376}
377
Sathya Perlaca34fe32012-11-06 17:48:56 +0000378static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000379{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000380 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000383 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 &rxf_stats->port[adapter->port_num];
385 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386
Sathya Perlaac124ff2011-07-25 19:10:14 +0000387 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000388 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390 drvs->rx_pause_frames = port_stats->rx_pause_frames;
391 drvs->rx_crc_errors = port_stats->rx_crc_errors;
392 drvs->rx_control_frames = port_stats->rx_control_frames;
393 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403 drvs->rx_dropped_header_too_small =
404 port_stats->rx_dropped_header_too_small;
405 drvs->rx_input_fifo_overflow_drop =
406 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000407 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000408 drvs->rx_alignment_symbol_errors =
409 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000410 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000411 drvs->tx_pauseframes = port_stats->tx_pauseframes;
412 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000413 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000414 drvs->jabber_events = port_stats->jabber_events;
415 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000416 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000417 drvs->forwarded_packets = rxf_stats->forwarded_packets;
418 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000419 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
420 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
422}
423
Selvin Xavier005d5692011-05-16 07:36:35 +0000424static void populate_lancer_stats(struct be_adapter *adapter)
425{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000426
Selvin Xavier005d5692011-05-16 07:36:35 +0000427 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000428 struct lancer_pport_stats *pport_stats =
429 pport_stats_from_cmd(adapter);
430
431 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
432 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
433 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
434 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000435 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000436 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000437 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
438 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
439 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
440 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
441 drvs->rx_dropped_tcp_length =
442 pport_stats->rx_dropped_invalid_tcp_length;
443 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
444 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
445 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
446 drvs->rx_dropped_header_too_small =
447 pport_stats->rx_dropped_header_too_small;
448 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000449 drvs->rx_address_filtered =
450 pport_stats->rx_address_filtered +
451 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000452 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000453 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000454 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
455 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000456 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000457 drvs->forwarded_packets = pport_stats->num_forwards_lo;
458 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000459 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000460 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000461}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000462
Sathya Perla09c1c682011-08-22 19:41:53 +0000463static void accumulate_16bit_val(u32 *acc, u16 val)
464{
465#define lo(x) (x & 0xFFFF)
466#define hi(x) (x & 0xFFFF0000)
467 bool wrapped = val < lo(*acc);
468 u32 newacc = hi(*acc) + val;
469
470 if (wrapped)
471 newacc += 65536;
472 ACCESS_ONCE(*acc) = newacc;
473}
474
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000475void populate_erx_stats(struct be_adapter *adapter,
476 struct be_rx_obj *rxo,
477 u32 erx_stat)
478{
479 if (!BEx_chip(adapter))
480 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
481 else
482 /* below erx HW counter can actually wrap around after
483 * 65535. Driver accumulates a 32-bit value
484 */
485 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
486 (u16)erx_stat);
487}
488
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000489void be_parse_stats(struct be_adapter *adapter)
490{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000491 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
492 struct be_rx_obj *rxo;
493 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000494 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000495
Sathya Perlaca34fe32012-11-06 17:48:56 +0000496 if (lancer_chip(adapter)) {
497 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000498 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000499 if (BE2_chip(adapter))
500 populate_be_v0_stats(adapter);
501 else
502 /* for BE3 and Skyhawk */
503 populate_be_v1_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000504
Sathya Perlaca34fe32012-11-06 17:48:56 +0000505 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
506 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000507 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
508 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000509 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000510 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000511}
512
Sathya Perlaab1594e2011-07-25 19:10:15 +0000513static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
514 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700515{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000516 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000517 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700518 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000519 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000520 u64 pkts, bytes;
521 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700522 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700523
Sathya Perla3abcded2010-10-03 22:12:27 -0700524 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000525 const struct be_rx_stats *rx_stats = rx_stats(rxo);
526 do {
527 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
528 pkts = rx_stats(rxo)->rx_pkts;
529 bytes = rx_stats(rxo)->rx_bytes;
530 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
531 stats->rx_packets += pkts;
532 stats->rx_bytes += bytes;
533 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
534 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
535 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700536 }
537
Sathya Perla3c8def92011-06-12 20:01:58 +0000538 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000539 const struct be_tx_stats *tx_stats = tx_stats(txo);
540 do {
541 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
542 pkts = tx_stats(txo)->tx_pkts;
543 bytes = tx_stats(txo)->tx_bytes;
544 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
545 stats->tx_packets += pkts;
546 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000547 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700548
549 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000550 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000551 drvs->rx_alignment_symbol_errors +
552 drvs->rx_in_range_errors +
553 drvs->rx_out_range_errors +
554 drvs->rx_frame_too_long +
555 drvs->rx_dropped_too_small +
556 drvs->rx_dropped_too_short +
557 drvs->rx_dropped_header_too_small +
558 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000559 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700560
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700561 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000562 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000563 drvs->rx_out_range_errors +
564 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000565
Sathya Perlaab1594e2011-07-25 19:10:15 +0000566 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700567
568 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000569 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000570
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700571 /* receiver fifo overrun */
572 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000573 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000574 drvs->rx_input_fifo_overflow_drop +
575 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000576 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700577}
578
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000579void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700580{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700581 struct net_device *netdev = adapter->netdev;
582
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000583 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000584 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000585 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700586 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000587
588 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
589 netif_carrier_on(netdev);
590 else
591 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700592}
593
Sathya Perla3c8def92011-06-12 20:01:58 +0000594static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000595 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700596{
Sathya Perla3c8def92011-06-12 20:01:58 +0000597 struct be_tx_stats *stats = tx_stats(txo);
598
Sathya Perlaab1594e2011-07-25 19:10:15 +0000599 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000600 stats->tx_reqs++;
601 stats->tx_wrbs += wrb_cnt;
602 stats->tx_bytes += copied;
603 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700604 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000605 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000606 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700607}
608
609/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000610static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
611 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700612{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700613 int cnt = (skb->len > skb->data_len);
614
615 cnt += skb_shinfo(skb)->nr_frags;
616
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700617 /* to account for hdr wrb */
618 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000619 if (lancer_chip(adapter) || !(cnt & 1)) {
620 *dummy = false;
621 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700622 /* add a dummy to make it an even num */
623 cnt++;
624 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000625 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700626 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
627 return cnt;
628}
629
630static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
631{
632 wrb->frag_pa_hi = upper_32_bits(addr);
633 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
634 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000635 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636}
637
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000638static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
639 struct sk_buff *skb)
640{
641 u8 vlan_prio;
642 u16 vlan_tag;
643
644 vlan_tag = vlan_tx_tag_get(skb);
645 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
646 /* If vlan priority provided by OS is NOT in available bmap */
647 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
648 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
649 adapter->recommended_prio;
650
651 return vlan_tag;
652}
653
Somnath Koturcc4ce022010-10-21 07:11:14 -0700654static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000655 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700656{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000657 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700658
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700659 memset(hdr, 0, sizeof(*hdr));
660
661 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
662
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000663 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
666 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000667 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000668 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700669 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
670 if (is_tcp_pkt(skb))
671 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
672 else if (is_udp_pkt(skb))
673 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
674 }
675
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700676 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700677 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000678 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700679 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700680 }
681
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000682 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
683 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700685 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
686 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
687}
688
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000689static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000690 bool unmap_single)
691{
692 dma_addr_t dma;
693
694 be_dws_le_to_cpu(wrb, sizeof(*wrb));
695
696 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000697 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000698 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000699 dma_unmap_single(dev, dma, wrb->frag_len,
700 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000701 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000702 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000703 }
704}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700705
Sathya Perla3c8def92011-06-12 20:01:58 +0000706static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000707 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
708 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700709{
Sathya Perla7101e112010-03-22 20:41:12 +0000710 dma_addr_t busaddr;
711 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000712 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700713 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700714 struct be_eth_wrb *wrb;
715 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000716 bool map_single = false;
717 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700718
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700719 hdr = queue_head_node(txq);
720 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000721 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700722
David S. Millerebc8d2a2009-06-09 01:01:31 -0700723 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700724 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000725 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
726 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000727 goto dma_err;
728 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700729 wrb = queue_head_node(txq);
730 wrb_fill(wrb, busaddr, len);
731 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732 queue_head_inc(txq);
733 copied += len;
734 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700735
David S. Millerebc8d2a2009-06-09 01:01:31 -0700736 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000737 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700738 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000739 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000740 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000741 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000742 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700743 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000744 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700745 be_dws_cpu_to_le(wrb, sizeof(*wrb));
746 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000747 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700748 }
749
750 if (dummy_wrb) {
751 wrb = queue_head_node(txq);
752 wrb_fill(wrb, 0, 0);
753 be_dws_cpu_to_le(wrb, sizeof(*wrb));
754 queue_head_inc(txq);
755 }
756
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000757 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700758 be_dws_cpu_to_le(hdr, sizeof(*hdr));
759
760 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000761dma_err:
762 txq->head = map_head;
763 while (copied) {
764 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000765 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000766 map_single = false;
767 copied -= wrb->frag_len;
768 queue_head_inc(txq);
769 }
770 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700771}
772
Somnath Kotur93040ae2012-06-26 22:32:10 +0000773static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000774 struct sk_buff *skb,
775 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000776{
777 u16 vlan_tag = 0;
778
779 skb = skb_share_check(skb, GFP_ATOMIC);
780 if (unlikely(!skb))
781 return skb;
782
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000783 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000784 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000785 else if (qnq_async_evt_rcvd(adapter) && adapter->pvid)
786 vlan_tag = adapter->pvid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000787
788 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400789 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000790 if (unlikely(!skb))
791 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000792 skb->vlan_tci = 0;
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000793 if (skip_hw_vlan)
794 *skip_hw_vlan = true;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000795 }
796
797 /* Insert the outer VLAN, if any */
798 if (adapter->qnq_vid) {
799 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400800 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000801 if (unlikely(!skb))
802 return skb;
803 if (skip_hw_vlan)
804 *skip_hw_vlan = true;
805 }
806
Somnath Kotur93040ae2012-06-26 22:32:10 +0000807 return skb;
808}
809
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000810static bool be_ipv6_exthdr_check(struct sk_buff *skb)
811{
812 struct ethhdr *eh = (struct ethhdr *)skb->data;
813 u16 offset = ETH_HLEN;
814
815 if (eh->h_proto == htons(ETH_P_IPV6)) {
816 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
817
818 offset += sizeof(struct ipv6hdr);
819 if (ip6h->nexthdr != NEXTHDR_TCP &&
820 ip6h->nexthdr != NEXTHDR_UDP) {
821 struct ipv6_opt_hdr *ehdr =
822 (struct ipv6_opt_hdr *) (skb->data + offset);
823
824 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
825 if (ehdr->hdrlen == 0xff)
826 return true;
827 }
828 }
829 return false;
830}
831
832static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
833{
834 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
835}
836
Sathya Perlaee9c7992013-05-22 23:04:55 +0000837static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
838 struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000839{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000840 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000841}
842
Sathya Perlaee9c7992013-05-22 23:04:55 +0000843static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
844 struct sk_buff *skb,
845 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700846{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000847 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000848 unsigned int eth_hdr_len;
849 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000850
Somnath Kotur48265662013-05-26 21:08:47 +0000851 /* Lancer ASIC has a bug wherein packets that are 32 bytes or less
852 * may cause a transmit stall on that port. So the work-around is to
853 * pad such packets to a 36-byte length.
854 */
855 if (unlikely(lancer_chip(adapter) && skb->len <= 32)) {
856 if (skb_padto(skb, 36))
857 goto tx_drop;
858 skb->len = 36;
859 }
860
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000861 /* For padded packets, BE HW modifies tot_len field in IP header
862 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000863 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000864 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000865 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
866 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000867 if (skb->len <= 60 &&
868 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000869 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000870 ip = (struct iphdr *)ip_hdr(skb);
871 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
872 }
873
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000874 /* If vlan tag is already inlined in the packet, skip HW VLAN
875 * tagging in UMC mode
876 */
877 if ((adapter->function_mode & UMC_ENABLED) &&
878 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perlaee9c7992013-05-22 23:04:55 +0000879 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000880
Somnath Kotur93040ae2012-06-26 22:32:10 +0000881 /* HW has a bug wherein it will calculate CSUM for VLAN
882 * pkts even though it is disabled.
883 * Manually insert VLAN in pkt.
884 */
885 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000886 vlan_tx_tag_present(skb)) {
887 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000888 if (unlikely(!skb))
889 goto tx_drop;
890 }
891
892 /* HW may lockup when VLAN HW tagging is requested on
893 * certain ipv6 packets. Drop such pkts if the HW workaround to
894 * skip HW tagging is not enabled by FW.
895 */
896 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000897 (adapter->pvid || adapter->qnq_vid) &&
898 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000899 goto tx_drop;
900
901 /* Manual VLAN tag insertion to prevent:
902 * ASIC lockup when the ASIC inserts VLAN tag into
903 * certain ipv6 packets. Insert VLAN tags in driver,
904 * and set event, completion, vlan bits accordingly
905 * in the Tx WRB.
906 */
907 if (be_ipv6_tx_stall_chk(adapter, skb) &&
908 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000909 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000910 if (unlikely(!skb))
911 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000912 }
913
Sathya Perlaee9c7992013-05-22 23:04:55 +0000914 return skb;
915tx_drop:
916 dev_kfree_skb_any(skb);
917 return NULL;
918}
919
920static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
921{
922 struct be_adapter *adapter = netdev_priv(netdev);
923 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
924 struct be_queue_info *txq = &txo->q;
925 bool dummy_wrb, stopped = false;
926 u32 wrb_cnt = 0, copied = 0;
927 bool skip_hw_vlan = false;
928 u32 start = txq->head;
929
930 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
931 if (!skb)
932 return NETDEV_TX_OK;
933
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000934 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700935
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000936 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
937 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000938 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000939 int gso_segs = skb_shinfo(skb)->gso_segs;
940
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000941 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000942 BUG_ON(txo->sent_skb_list[start]);
943 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700944
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000945 /* Ensure txq has space for the next skb; Else stop the queue
946 * *BEFORE* ringing the tx doorbell, so that we serialze the
947 * tx compls of the current transmit which'll wake up the queue
948 */
Sathya Perla7101e112010-03-22 20:41:12 +0000949 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000950 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
951 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000952 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000953 stopped = true;
954 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700955
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000956 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000957
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000958 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000959 } else {
960 txq->head = start;
961 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700962 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700963 return NETDEV_TX_OK;
964}
965
966static int be_change_mtu(struct net_device *netdev, int new_mtu)
967{
968 struct be_adapter *adapter = netdev_priv(netdev);
969 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000970 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
971 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700972 dev_info(&adapter->pdev->dev,
973 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000974 BE_MIN_MTU,
975 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700976 return -EINVAL;
977 }
978 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
979 netdev->mtu, new_mtu);
980 netdev->mtu = new_mtu;
981 return 0;
982}
983
984/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000985 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
986 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700987 */
Sathya Perla10329df2012-06-05 19:37:18 +0000988static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700989{
Sathya Perla10329df2012-06-05 19:37:18 +0000990 u16 vids[BE_NUM_VLANS_SUPPORTED];
991 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000992 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000993
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000994 /* No need to further configure vids if in promiscuous mode */
995 if (adapter->promiscuous)
996 return 0;
997
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000998 if (adapter->vlans_added > adapter->max_vlans)
999 goto set_vlan_promisc;
1000
1001 /* Construct VLAN Table to give to HW */
1002 for (i = 0; i < VLAN_N_VID; i++)
1003 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +00001004 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001005
1006 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +00001007 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001008
1009 /* Set to VLAN promisc mode as setting VLAN filter failed */
1010 if (status) {
1011 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1012 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
1013 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001014 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001015
Sathya Perlab31c50a2009-09-17 10:30:13 -07001016 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001017
1018set_vlan_promisc:
1019 status = be_cmd_vlan_config(adapter, adapter->if_handle,
1020 NULL, 0, 1, 1);
1021 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001022}
1023
Patrick McHardy80d5c362013-04-19 02:04:28 +00001024static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001025{
1026 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001027 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001028
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001029 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001030 status = -EINVAL;
1031 goto ret;
1032 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001033
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001034 /* Packets with VID 0 are always received by Lancer by default */
1035 if (lancer_chip(adapter) && vid == 0)
1036 goto ret;
1037
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001038 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001039 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Sathya Perla10329df2012-06-05 19:37:18 +00001040 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001041
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001042 if (!status)
1043 adapter->vlans_added++;
1044 else
1045 adapter->vlan_tag[vid] = 0;
1046ret:
1047 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001048}
1049
Patrick McHardy80d5c362013-04-19 02:04:28 +00001050static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001051{
1052 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001053 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001054
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001055 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001056 status = -EINVAL;
1057 goto ret;
1058 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001059
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001060 /* Packets with VID 0 are always received by Lancer by default */
1061 if (lancer_chip(adapter) && vid == 0)
1062 goto ret;
1063
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001064 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001065 if (adapter->vlans_added <= adapter->max_vlans)
Sathya Perla10329df2012-06-05 19:37:18 +00001066 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001067
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001068 if (!status)
1069 adapter->vlans_added--;
1070 else
1071 adapter->vlan_tag[vid] = 1;
1072ret:
1073 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001074}
1075
Sathya Perlaa54769f2011-10-24 02:45:00 +00001076static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001077{
1078 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001079 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001080
1081 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001082 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001083 adapter->promiscuous = true;
1084 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001085 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001086
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001087 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001088 if (adapter->promiscuous) {
1089 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +00001090 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001091
1092 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001093 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001094 }
1095
Sathya Perlae7b909a2009-11-22 22:01:10 +00001096 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001097 if (netdev->flags & IFF_ALLMULTI ||
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001098 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001099 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001100 goto done;
1101 }
1102
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001103 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1104 struct netdev_hw_addr *ha;
1105 int i = 1; /* First slot is claimed by the Primary MAC */
1106
1107 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1108 be_cmd_pmac_del(adapter, adapter->if_handle,
1109 adapter->pmac_id[i], 0);
1110 }
1111
1112 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
1113 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1114 adapter->promiscuous = true;
1115 goto done;
1116 }
1117
1118 netdev_for_each_uc_addr(ha, adapter->netdev) {
1119 adapter->uc_macs++; /* First slot is for Primary MAC */
1120 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1121 adapter->if_handle,
1122 &adapter->pmac_id[adapter->uc_macs], 0);
1123 }
1124 }
1125
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001126 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1127
1128 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1129 if (status) {
1130 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1131 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1132 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1133 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001134done:
1135 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001136}
1137
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001138static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1139{
1140 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001141 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001142 int status;
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +00001143 bool active_mac = false;
1144 u32 pmac_id;
1145 u8 old_mac[ETH_ALEN];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001146
Sathya Perla11ac75e2011-12-13 00:58:50 +00001147 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001148 return -EPERM;
1149
Sathya Perla11ac75e2011-12-13 00:58:50 +00001150 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001151 return -EINVAL;
1152
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001153 if (lancer_chip(adapter)) {
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +00001154 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1155 &pmac_id, vf + 1);
1156 if (!status && active_mac)
1157 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1158 pmac_id, vf + 1);
1159
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001160 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1161 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +00001162 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1163 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001164
Sathya Perla11ac75e2011-12-13 00:58:50 +00001165 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1166 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001167 }
1168
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001169 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001170 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1171 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001172 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001173 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001174
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001175 return status;
1176}
1177
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001178static int be_get_vf_config(struct net_device *netdev, int vf,
1179 struct ifla_vf_info *vi)
1180{
1181 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001182 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001183
Sathya Perla11ac75e2011-12-13 00:58:50 +00001184 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001185 return -EPERM;
1186
Sathya Perla11ac75e2011-12-13 00:58:50 +00001187 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001188 return -EINVAL;
1189
1190 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001191 vi->tx_rate = vf_cfg->tx_rate;
1192 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001193 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001194 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001195
1196 return 0;
1197}
1198
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001199static int be_set_vf_vlan(struct net_device *netdev,
1200 int vf, u16 vlan, u8 qos)
1201{
1202 struct be_adapter *adapter = netdev_priv(netdev);
1203 int status = 0;
1204
Sathya Perla11ac75e2011-12-13 00:58:50 +00001205 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001206 return -EPERM;
1207
Sathya Perla11ac75e2011-12-13 00:58:50 +00001208 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001209 return -EINVAL;
1210
1211 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001212 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1213 /* If this is new value, program it. Else skip. */
1214 adapter->vf_cfg[vf].vlan_tag = vlan;
1215
1216 status = be_cmd_set_hsw_config(adapter, vlan,
1217 vf + 1, adapter->vf_cfg[vf].if_handle);
1218 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001219 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001220 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001221 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001222 vlan = adapter->vf_cfg[vf].def_vid;
1223 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1224 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001225 }
1226
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001227
1228 if (status)
1229 dev_info(&adapter->pdev->dev,
1230 "VLAN %d config on VF %d failed\n", vlan, vf);
1231 return status;
1232}
1233
Ajit Khapardee1d18732010-07-23 01:52:13 +00001234static int be_set_vf_tx_rate(struct net_device *netdev,
1235 int vf, int rate)
1236{
1237 struct be_adapter *adapter = netdev_priv(netdev);
1238 int status = 0;
1239
Sathya Perla11ac75e2011-12-13 00:58:50 +00001240 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001241 return -EPERM;
1242
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001243 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001244 return -EINVAL;
1245
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001246 if (rate < 100 || rate > 10000) {
1247 dev_err(&adapter->pdev->dev,
1248 "tx rate must be between 100 and 10000 Mbps\n");
1249 return -EINVAL;
1250 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001251
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001252 if (lancer_chip(adapter))
1253 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1254 else
1255 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001256
1257 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001258 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001259 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001260 else
1261 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001262 return status;
1263}
1264
Sathya Perla39f1d942012-05-08 19:41:24 +00001265static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1266{
1267 struct pci_dev *dev, *pdev = adapter->pdev;
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001268 int vfs = 0, assigned_vfs = 0, pos;
Sathya Perla39f1d942012-05-08 19:41:24 +00001269 u16 offset, stride;
1270
1271 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
Sathya Perlad79c0a22012-06-05 19:37:22 +00001272 if (!pos)
1273 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00001274 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1275 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1276
1277 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1278 while (dev) {
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001279 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
Sathya Perla39f1d942012-05-08 19:41:24 +00001280 vfs++;
1281 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1282 assigned_vfs++;
1283 }
1284 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1285 }
1286 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1287}
1288
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001289static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001290{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001291 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001292 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001293 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001294 u64 pkts;
1295 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001296
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001297 if (!eqo->enable_aic) {
1298 eqd = eqo->eqd;
1299 goto modify_eqd;
1300 }
1301
1302 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001303 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001304
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001305 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1306
Sathya Perla4097f662009-03-24 16:40:13 -07001307 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001308 if (time_before(now, stats->rx_jiffies)) {
1309 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001310 return;
1311 }
1312
Sathya Perlaac124ff2011-07-25 19:10:14 +00001313 /* Update once a second */
1314 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001315 return;
1316
Sathya Perlaab1594e2011-07-25 19:10:15 +00001317 do {
1318 start = u64_stats_fetch_begin_bh(&stats->sync);
1319 pkts = stats->rx_pkts;
1320 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1321
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001322 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001323 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001324 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001325 eqd = (stats->rx_pps / 110000) << 3;
1326 eqd = min(eqd, eqo->max_eqd);
1327 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001328 if (eqd < 10)
1329 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001330
1331modify_eqd:
1332 if (eqd != eqo->cur_eqd) {
1333 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1334 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001335 }
Sathya Perla4097f662009-03-24 16:40:13 -07001336}
1337
Sathya Perla3abcded2010-10-03 22:12:27 -07001338static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001339 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001340{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001341 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001342
Sathya Perlaab1594e2011-07-25 19:10:15 +00001343 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001344 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001345 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001346 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001347 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001348 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001349 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001350 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001351 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001352}
1353
Sathya Perla2e588f82011-03-11 02:49:26 +00001354static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001355{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001356 /* L4 checksum is not reliable for non TCP/UDP packets.
1357 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001358 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1359 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001360}
1361
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001362static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1363 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001364{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001365 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001366 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001367 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001368
Sathya Perla3abcded2010-10-03 22:12:27 -07001369 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001370 BUG_ON(!rx_page_info->page);
1371
Ajit Khaparde205859a2010-02-09 01:34:21 +00001372 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001373 dma_unmap_page(&adapter->pdev->dev,
1374 dma_unmap_addr(rx_page_info, bus),
1375 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001376 rx_page_info->last_page_user = false;
1377 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001378
1379 atomic_dec(&rxq->used);
1380 return rx_page_info;
1381}
1382
1383/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001384static void be_rx_compl_discard(struct be_rx_obj *rxo,
1385 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001386{
Sathya Perla3abcded2010-10-03 22:12:27 -07001387 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001388 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001389 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001390
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001391 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001392 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001393 put_page(page_info->page);
1394 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001395 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001396 }
1397}
1398
1399/*
1400 * skb_fill_rx_data forms a complete skb for an ether frame
1401 * indicated by rxcp.
1402 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001403static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1404 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001405{
Sathya Perla3abcded2010-10-03 22:12:27 -07001406 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001407 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001408 u16 i, j;
1409 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001410 u8 *start;
1411
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001412 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001413 start = page_address(page_info->page) + page_info->page_offset;
1414 prefetch(start);
1415
1416 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001417 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001418
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001419 skb->len = curr_frag_len;
1420 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001421 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001422 /* Complete packet has now been moved to data */
1423 put_page(page_info->page);
1424 skb->data_len = 0;
1425 skb->tail += curr_frag_len;
1426 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001427 hdr_len = ETH_HLEN;
1428 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001429 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001430 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001431 skb_shinfo(skb)->frags[0].page_offset =
1432 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001433 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001434 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001435 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001436 skb->tail += hdr_len;
1437 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001438 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001439
Sathya Perla2e588f82011-03-11 02:49:26 +00001440 if (rxcp->pkt_size <= rx_frag_size) {
1441 BUG_ON(rxcp->num_rcvd != 1);
1442 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001443 }
1444
1445 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001446 index_inc(&rxcp->rxq_idx, rxq->len);
1447 remaining = rxcp->pkt_size - curr_frag_len;
1448 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001449 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001450 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001451
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001452 /* Coalesce all frags from the same physical page in one slot */
1453 if (page_info->page_offset == 0) {
1454 /* Fresh page */
1455 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001456 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001457 skb_shinfo(skb)->frags[j].page_offset =
1458 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001459 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001460 skb_shinfo(skb)->nr_frags++;
1461 } else {
1462 put_page(page_info->page);
1463 }
1464
Eric Dumazet9e903e02011-10-18 21:00:24 +00001465 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001466 skb->len += curr_frag_len;
1467 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001468 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001469 remaining -= curr_frag_len;
1470 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001471 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001472 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001473 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001474}
1475
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001476/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001477static void be_rx_compl_process(struct be_rx_obj *rxo,
1478 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001479{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001480 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001481 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001482 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001483
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001484 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001485 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001486 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001487 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001488 return;
1489 }
1490
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001491 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001492
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001493 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001494 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001495 else
1496 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001497
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001498 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001499 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001500 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001501 skb->rxhash = rxcp->rss_hash;
1502
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001503
Jiri Pirko343e43c2011-08-25 02:50:51 +00001504 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001505 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001506
1507 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001508}
1509
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001510/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001511void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1512 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001513{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001514 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001515 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001516 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001517 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001518 u16 remaining, curr_frag_len;
1519 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001520
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001521 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001522 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001523 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001524 return;
1525 }
1526
Sathya Perla2e588f82011-03-11 02:49:26 +00001527 remaining = rxcp->pkt_size;
1528 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001529 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001530
1531 curr_frag_len = min(remaining, rx_frag_size);
1532
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001533 /* Coalesce all frags from the same physical page in one slot */
1534 if (i == 0 || page_info->page_offset == 0) {
1535 /* First frag or Fresh page */
1536 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001537 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001538 skb_shinfo(skb)->frags[j].page_offset =
1539 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001540 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001541 } else {
1542 put_page(page_info->page);
1543 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001544 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001545 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001546 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001547 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001548 memset(page_info, 0, sizeof(*page_info));
1549 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001550 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001551
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001552 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001553 skb->len = rxcp->pkt_size;
1554 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001555 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001556 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001557 if (adapter->netdev->features & NETIF_F_RXHASH)
1558 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001559
Jiri Pirko343e43c2011-08-25 02:50:51 +00001560 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001561 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001562
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001563 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001564}
1565
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001566static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1567 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001568{
Sathya Perla2e588f82011-03-11 02:49:26 +00001569 rxcp->pkt_size =
1570 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1571 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1572 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1573 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001574 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001575 rxcp->ip_csum =
1576 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1577 rxcp->l4_csum =
1578 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1579 rxcp->ipv6 =
1580 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1581 rxcp->rxq_idx =
1582 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1583 rxcp->num_rcvd =
1584 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1585 rxcp->pkt_type =
1586 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001587 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001588 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001589 if (rxcp->vlanf) {
1590 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001591 compl);
1592 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1593 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001594 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001595 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001596}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001597
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001598static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1599 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001600{
1601 rxcp->pkt_size =
1602 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1603 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1604 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1605 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001606 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001607 rxcp->ip_csum =
1608 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1609 rxcp->l4_csum =
1610 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1611 rxcp->ipv6 =
1612 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1613 rxcp->rxq_idx =
1614 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1615 rxcp->num_rcvd =
1616 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1617 rxcp->pkt_type =
1618 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001619 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001620 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001621 if (rxcp->vlanf) {
1622 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001623 compl);
1624 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1625 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001626 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001627 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Somnath Koture38b1702013-05-29 22:55:56 +00001628 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1629 ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001630}
1631
1632static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1633{
1634 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1635 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1636 struct be_adapter *adapter = rxo->adapter;
1637
1638 /* For checking the valid bit it is Ok to use either definition as the
1639 * valid bit is at the same position in both v0 and v1 Rx compl */
1640 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001641 return NULL;
1642
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001643 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001644 be_dws_le_to_cpu(compl, sizeof(*compl));
1645
1646 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001647 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001648 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001649 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001650
Somnath Koture38b1702013-05-29 22:55:56 +00001651 if (rxcp->ip_frag)
1652 rxcp->l4_csum = 0;
1653
Sathya Perla15d72182011-03-21 20:49:26 +00001654 if (rxcp->vlanf) {
1655 /* vlanf could be wrongly set in some cards.
1656 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001657 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001658 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001659
Sathya Perla15d72182011-03-21 20:49:26 +00001660 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001661 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001662
Somnath Kotur939cf302011-08-18 21:51:49 -07001663 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001664 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001665 rxcp->vlanf = 0;
1666 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001667
1668 /* As the compl has been parsed, reset it; we wont touch it again */
1669 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001670
Sathya Perla3abcded2010-10-03 22:12:27 -07001671 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001672 return rxcp;
1673}
1674
Eric Dumazet1829b082011-03-01 05:48:12 +00001675static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001676{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001677 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001678
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001679 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001680 gfp |= __GFP_COMP;
1681 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001682}
1683
1684/*
1685 * Allocate a page, split it to fragments of size rx_frag_size and post as
1686 * receive buffers to BE
1687 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001688static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001689{
Sathya Perla3abcded2010-10-03 22:12:27 -07001690 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001691 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001692 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001693 struct page *pagep = NULL;
1694 struct be_eth_rx_d *rxd;
1695 u64 page_dmaaddr = 0, frag_dmaaddr;
1696 u32 posted, page_offset = 0;
1697
Sathya Perla3abcded2010-10-03 22:12:27 -07001698 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001699 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1700 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001701 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001702 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001703 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001704 break;
1705 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001706 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1707 0, adapter->big_page_size,
1708 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001709 page_info->page_offset = 0;
1710 } else {
1711 get_page(pagep);
1712 page_info->page_offset = page_offset + rx_frag_size;
1713 }
1714 page_offset = page_info->page_offset;
1715 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001716 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001717 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1718
1719 rxd = queue_head_node(rxq);
1720 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1721 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001722
1723 /* Any space left in the current big page for another frag? */
1724 if ((page_offset + rx_frag_size + rx_frag_size) >
1725 adapter->big_page_size) {
1726 pagep = NULL;
1727 page_info->last_page_user = true;
1728 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001729
1730 prev_page_info = page_info;
1731 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001732 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001733 }
1734 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001735 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001736
1737 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001738 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001739 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001740 } else if (atomic_read(&rxq->used) == 0) {
1741 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001742 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001743 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001744}
1745
Sathya Perla5fb379e2009-06-18 00:02:59 +00001746static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001747{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001748 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1749
1750 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1751 return NULL;
1752
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001753 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001754 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1755
1756 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1757
1758 queue_tail_inc(tx_cq);
1759 return txcp;
1760}
1761
Sathya Perla3c8def92011-06-12 20:01:58 +00001762static u16 be_tx_compl_process(struct be_adapter *adapter,
1763 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001764{
Sathya Perla3c8def92011-06-12 20:01:58 +00001765 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001766 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001767 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001768 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001769 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1770 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001771
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001772 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001773 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001774 sent_skbs[txq->tail] = NULL;
1775
1776 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001777 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001778
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001779 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001780 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001781 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001782 unmap_tx_frag(&adapter->pdev->dev, wrb,
1783 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001784 unmap_skb_hdr = false;
1785
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001786 num_wrbs++;
1787 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001788 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001789
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001790 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001791 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001792}
1793
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001794/* Return the number of events in the event queue */
1795static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001796{
1797 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001798 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001799
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001800 do {
1801 eqe = queue_tail_node(&eqo->q);
1802 if (eqe->evt == 0)
1803 break;
1804
1805 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001806 eqe->evt = 0;
1807 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001808 queue_tail_inc(&eqo->q);
1809 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001810
1811 return num;
1812}
1813
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001814/* Leaves the EQ is disarmed state */
1815static void be_eq_clean(struct be_eq_obj *eqo)
1816{
1817 int num = events_get(eqo);
1818
1819 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1820}
1821
1822static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001823{
1824 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001825 struct be_queue_info *rxq = &rxo->q;
1826 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001827 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00001828 struct be_adapter *adapter = rxo->adapter;
1829 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001830 u16 tail;
1831
Sathya Perlad23e9462012-12-17 19:38:51 +00001832 /* Consume pending rx completions.
1833 * Wait for the flush completion (identified by zero num_rcvd)
1834 * to arrive. Notify CQ even when there are no more CQ entries
1835 * for HW to flush partially coalesced CQ entries.
1836 * In Lancer, there is no need to wait for flush compl.
1837 */
1838 for (;;) {
1839 rxcp = be_rx_compl_get(rxo);
1840 if (rxcp == NULL) {
1841 if (lancer_chip(adapter))
1842 break;
1843
1844 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1845 dev_warn(&adapter->pdev->dev,
1846 "did not receive flush compl\n");
1847 break;
1848 }
1849 be_cq_notify(adapter, rx_cq->id, true, 0);
1850 mdelay(1);
1851 } else {
1852 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00001853 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00001854 if (rxcp->num_rcvd == 0)
1855 break;
1856 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001857 }
1858
Sathya Perlad23e9462012-12-17 19:38:51 +00001859 /* After cleanup, leave the CQ in unarmed state */
1860 be_cq_notify(adapter, rx_cq->id, false, 0);
1861
1862 /* Then free posted rx buffers that were not used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001863 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001864 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001865 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001866 put_page(page_info->page);
1867 memset(page_info, 0, sizeof(*page_info));
1868 }
1869 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001870 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001871}
1872
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001873static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001874{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001875 struct be_tx_obj *txo;
1876 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001877 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001878 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001879 struct sk_buff *sent_skb;
1880 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001881 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001882
Sathya Perlaa8e91792009-08-10 03:42:43 +00001883 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1884 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001885 pending_txqs = adapter->num_tx_qs;
1886
1887 for_all_tx_queues(adapter, txo, i) {
1888 txq = &txo->q;
1889 while ((txcp = be_tx_compl_get(&txo->cq))) {
1890 end_idx =
1891 AMAP_GET_BITS(struct amap_eth_tx_compl,
1892 wrb_index, txcp);
1893 num_wrbs += be_tx_compl_process(adapter, txo,
1894 end_idx);
1895 cmpl++;
1896 }
1897 if (cmpl) {
1898 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1899 atomic_sub(num_wrbs, &txq->used);
1900 cmpl = 0;
1901 num_wrbs = 0;
1902 }
1903 if (atomic_read(&txq->used) == 0)
1904 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001905 }
1906
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001907 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001908 break;
1909
1910 mdelay(1);
1911 } while (true);
1912
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001913 for_all_tx_queues(adapter, txo, i) {
1914 txq = &txo->q;
1915 if (atomic_read(&txq->used))
1916 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1917 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001918
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001919 /* free posted tx for which compls will never arrive */
1920 while (atomic_read(&txq->used)) {
1921 sent_skb = txo->sent_skb_list[txq->tail];
1922 end_idx = txq->tail;
1923 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1924 &dummy_wrb);
1925 index_adv(&end_idx, num_wrbs - 1, txq->len);
1926 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1927 atomic_sub(num_wrbs, &txq->used);
1928 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001929 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001930}
1931
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001932static void be_evt_queues_destroy(struct be_adapter *adapter)
1933{
1934 struct be_eq_obj *eqo;
1935 int i;
1936
1937 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001938 if (eqo->q.created) {
1939 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001940 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001941 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001942 be_queue_free(adapter, &eqo->q);
1943 }
1944}
1945
1946static int be_evt_queues_create(struct be_adapter *adapter)
1947{
1948 struct be_queue_info *eq;
1949 struct be_eq_obj *eqo;
1950 int i, rc;
1951
1952 adapter->num_evt_qs = num_irqs(adapter);
1953
1954 for_all_evt_queues(adapter, eqo, i) {
1955 eqo->adapter = adapter;
1956 eqo->tx_budget = BE_TX_BUDGET;
1957 eqo->idx = i;
1958 eqo->max_eqd = BE_MAX_EQD;
1959 eqo->enable_aic = true;
1960
1961 eq = &eqo->q;
1962 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1963 sizeof(struct be_eq_entry));
1964 if (rc)
1965 return rc;
1966
1967 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1968 if (rc)
1969 return rc;
1970 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001971 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001972}
1973
Sathya Perla5fb379e2009-06-18 00:02:59 +00001974static void be_mcc_queues_destroy(struct be_adapter *adapter)
1975{
1976 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001977
Sathya Perla8788fdc2009-07-27 22:52:03 +00001978 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001979 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001980 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001981 be_queue_free(adapter, q);
1982
Sathya Perla8788fdc2009-07-27 22:52:03 +00001983 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001984 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001985 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001986 be_queue_free(adapter, q);
1987}
1988
1989/* Must be called only after TX qs are created as MCC shares TX EQ */
1990static int be_mcc_queues_create(struct be_adapter *adapter)
1991{
1992 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001993
Sathya Perla8788fdc2009-07-27 22:52:03 +00001994 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001995 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001996 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001997 goto err;
1998
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001999 /* Use the default EQ for MCC completions */
2000 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002001 goto mcc_cq_free;
2002
Sathya Perla8788fdc2009-07-27 22:52:03 +00002003 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002004 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2005 goto mcc_cq_destroy;
2006
Sathya Perla8788fdc2009-07-27 22:52:03 +00002007 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002008 goto mcc_q_free;
2009
2010 return 0;
2011
2012mcc_q_free:
2013 be_queue_free(adapter, q);
2014mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002015 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002016mcc_cq_free:
2017 be_queue_free(adapter, cq);
2018err:
2019 return -1;
2020}
2021
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002022static void be_tx_queues_destroy(struct be_adapter *adapter)
2023{
2024 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002025 struct be_tx_obj *txo;
2026 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002027
Sathya Perla3c8def92011-06-12 20:01:58 +00002028 for_all_tx_queues(adapter, txo, i) {
2029 q = &txo->q;
2030 if (q->created)
2031 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2032 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002033
Sathya Perla3c8def92011-06-12 20:01:58 +00002034 q = &txo->cq;
2035 if (q->created)
2036 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2037 be_queue_free(adapter, q);
2038 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002039}
2040
Sathya Perladafc0fe2011-10-24 02:45:02 +00002041static int be_num_txqs_want(struct be_adapter *adapter)
2042{
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002043 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
2044 be_is_mc(adapter) ||
2045 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
Sathya Perlaca34fe32012-11-06 17:48:56 +00002046 BE2_chip(adapter))
Sathya Perladafc0fe2011-10-24 02:45:02 +00002047 return 1;
2048 else
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002049 return adapter->max_tx_queues;
Sathya Perladafc0fe2011-10-24 02:45:02 +00002050}
2051
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002052static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002053{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002054 struct be_queue_info *cq, *eq;
2055 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00002056 struct be_tx_obj *txo;
2057 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002058
Sathya Perladafc0fe2011-10-24 02:45:02 +00002059 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00002060 if (adapter->num_tx_qs != MAX_TX_QS) {
2061 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00002062 netif_set_real_num_tx_queues(adapter->netdev,
2063 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00002064 rtnl_unlock();
2065 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00002066
Sathya Perla3c8def92011-06-12 20:01:58 +00002067 for_all_tx_queues(adapter, txo, i) {
2068 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002069 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2070 sizeof(struct be_eth_tx_compl));
2071 if (status)
2072 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002073
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002074 /* If num_evt_qs is less than num_tx_qs, then more than
2075 * one txq share an eq
2076 */
2077 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2078 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2079 if (status)
2080 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00002081 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002082 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002083}
2084
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002085static int be_tx_qs_create(struct be_adapter *adapter)
2086{
2087 struct be_tx_obj *txo;
2088 int i, status;
2089
2090 for_all_tx_queues(adapter, txo, i) {
2091 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2092 sizeof(struct be_eth_wrb));
2093 if (status)
2094 return status;
2095
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002096 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002097 if (status)
2098 return status;
2099 }
2100
Sathya Perlad3791422012-09-28 04:39:44 +00002101 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2102 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002103 return 0;
2104}
2105
2106static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002107{
2108 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002109 struct be_rx_obj *rxo;
2110 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002111
Sathya Perla3abcded2010-10-03 22:12:27 -07002112 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002113 q = &rxo->cq;
2114 if (q->created)
2115 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2116 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002117 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002118}
2119
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002120static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002121{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002122 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002123 struct be_rx_obj *rxo;
2124 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002125
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002126 /* We'll create as many RSS rings as there are irqs.
2127 * But when there's only one irq there's no use creating RSS rings
2128 */
2129 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2130 num_irqs(adapter) + 1 : 1;
Sathya Perla7f640062012-06-05 19:37:20 +00002131 if (adapter->num_rx_qs != MAX_RX_QS) {
2132 rtnl_lock();
2133 netif_set_real_num_rx_queues(adapter->netdev,
2134 adapter->num_rx_qs);
2135 rtnl_unlock();
2136 }
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002137
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002138 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002139 for_all_rx_queues(adapter, rxo, i) {
2140 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002141 cq = &rxo->cq;
2142 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2143 sizeof(struct be_eth_rx_compl));
2144 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002145 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002146
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002147 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2148 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002149 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002150 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002151 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002152
Sathya Perlad3791422012-09-28 04:39:44 +00002153 dev_info(&adapter->pdev->dev,
2154 "created %d RSS queue(s) and 1 default RX queue\n",
2155 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002156 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002157}
2158
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002159static irqreturn_t be_intx(int irq, void *dev)
2160{
Sathya Perlae49cc342012-11-27 19:50:02 +00002161 struct be_eq_obj *eqo = dev;
2162 struct be_adapter *adapter = eqo->adapter;
2163 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002164
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002165 /* IRQ is not expected when NAPI is scheduled as the EQ
2166 * will not be armed.
2167 * But, this can happen on Lancer INTx where it takes
2168 * a while to de-assert INTx or in BE2 where occasionaly
2169 * an interrupt may be raised even when EQ is unarmed.
2170 * If NAPI is already scheduled, then counting & notifying
2171 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002172 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002173 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002174 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002175 __napi_schedule(&eqo->napi);
2176 if (num_evts)
2177 eqo->spurious_intr = 0;
2178 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002179 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002180
2181 /* Return IRQ_HANDLED only for the the first spurious intr
2182 * after a valid intr to stop the kernel from branding
2183 * this irq as a bad one!
2184 */
2185 if (num_evts || eqo->spurious_intr++ == 0)
2186 return IRQ_HANDLED;
2187 else
2188 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002189}
2190
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002191static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002192{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002193 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002194
Sathya Perla0b545a62012-11-23 00:27:18 +00002195 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2196 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002197 return IRQ_HANDLED;
2198}
2199
Sathya Perla2e588f82011-03-11 02:49:26 +00002200static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002201{
Somnath Koture38b1702013-05-29 22:55:56 +00002202 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002203}
2204
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002205static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2206 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002207{
Sathya Perla3abcded2010-10-03 22:12:27 -07002208 struct be_adapter *adapter = rxo->adapter;
2209 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002210 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002211 u32 work_done;
2212
2213 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002214 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002215 if (!rxcp)
2216 break;
2217
Sathya Perla12004ae2011-08-02 19:57:46 +00002218 /* Is it a flush compl that has no data */
2219 if (unlikely(rxcp->num_rcvd == 0))
2220 goto loop_continue;
2221
2222 /* Discard compl with partial DMA Lancer B0 */
2223 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002224 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002225 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002226 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002227
Sathya Perla12004ae2011-08-02 19:57:46 +00002228 /* On BE drop pkts that arrive due to imperfect filtering in
2229 * promiscuous mode on some skews
2230 */
2231 if (unlikely(rxcp->port != adapter->port_num &&
2232 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002233 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002234 goto loop_continue;
2235 }
2236
2237 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002238 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002239 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002240 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002241loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002242 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002243 }
2244
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002245 if (work_done) {
2246 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002247
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002248 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2249 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002250 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002251
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002252 return work_done;
2253}
2254
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002255static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2256 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002257{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002258 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002259 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002260
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002261 for (work_done = 0; work_done < budget; work_done++) {
2262 txcp = be_tx_compl_get(&txo->cq);
2263 if (!txcp)
2264 break;
2265 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002266 AMAP_GET_BITS(struct amap_eth_tx_compl,
2267 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002268 }
2269
2270 if (work_done) {
2271 be_cq_notify(adapter, txo->cq.id, true, work_done);
2272 atomic_sub(num_wrbs, &txo->q.used);
2273
2274 /* As Tx wrbs have been freed up, wake up netdev queue
2275 * if it was stopped due to lack of tx wrbs. */
2276 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2277 atomic_read(&txo->q.used) < txo->q.len / 2) {
2278 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002279 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002280
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002281 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2282 tx_stats(txo)->tx_compl += work_done;
2283 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2284 }
2285 return (work_done < budget); /* Done */
2286}
Sathya Perla3c8def92011-06-12 20:01:58 +00002287
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002288int be_poll(struct napi_struct *napi, int budget)
2289{
2290 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2291 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002292 int max_work = 0, work, i, num_evts;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002293 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002294
Sathya Perla0b545a62012-11-23 00:27:18 +00002295 num_evts = events_get(eqo);
2296
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002297 /* Process all TXQs serviced by this EQ */
2298 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2299 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2300 eqo->tx_budget, i);
2301 if (!tx_done)
2302 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002303 }
2304
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002305 /* This loop will iterate twice for EQ0 in which
2306 * completions of the last RXQ (default one) are also processed
2307 * For other EQs the loop iterates only once
2308 */
2309 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2310 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2311 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002312 }
2313
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002314 if (is_mcc_eqo(eqo))
2315 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002316
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002317 if (max_work < budget) {
2318 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002319 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002320 } else {
2321 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002322 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002323 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002324 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002325}
2326
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002327void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002328{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002329 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2330 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002331 u32 i;
2332
Sathya Perlad23e9462012-12-17 19:38:51 +00002333 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002334 return;
2335
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002336 if (lancer_chip(adapter)) {
2337 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2338 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2339 sliport_err1 = ioread32(adapter->db +
2340 SLIPORT_ERROR1_OFFSET);
2341 sliport_err2 = ioread32(adapter->db +
2342 SLIPORT_ERROR2_OFFSET);
2343 }
2344 } else {
2345 pci_read_config_dword(adapter->pdev,
2346 PCICFG_UE_STATUS_LOW, &ue_lo);
2347 pci_read_config_dword(adapter->pdev,
2348 PCICFG_UE_STATUS_HIGH, &ue_hi);
2349 pci_read_config_dword(adapter->pdev,
2350 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2351 pci_read_config_dword(adapter->pdev,
2352 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002353
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002354 ue_lo = (ue_lo & ~ue_lo_mask);
2355 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002356 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002357
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002358 /* On certain platforms BE hardware can indicate spurious UEs.
2359 * Allow the h/w to stop working completely in case of a real UE.
2360 * Hence not setting the hw_error for UE detection.
2361 */
2362 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002363 adapter->hw_error = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002364 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002365 "Error detected in the card\n");
2366 }
2367
2368 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2369 dev_err(&adapter->pdev->dev,
2370 "ERR: sliport status 0x%x\n", sliport_status);
2371 dev_err(&adapter->pdev->dev,
2372 "ERR: sliport error1 0x%x\n", sliport_err1);
2373 dev_err(&adapter->pdev->dev,
2374 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002375 }
2376
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002377 if (ue_lo) {
2378 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2379 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002380 dev_err(&adapter->pdev->dev,
2381 "UE: %s bit set\n", ue_status_low_desc[i]);
2382 }
2383 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002384
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002385 if (ue_hi) {
2386 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2387 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002388 dev_err(&adapter->pdev->dev,
2389 "UE: %s bit set\n", ue_status_hi_desc[i]);
2390 }
2391 }
2392
2393}
2394
Sathya Perla8d56ff12009-11-22 22:02:26 +00002395static void be_msix_disable(struct be_adapter *adapter)
2396{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002397 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002398 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002399 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002400 }
2401}
2402
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002403static uint be_num_rss_want(struct be_adapter *adapter)
2404{
Yuval Mintz30e80b52012-07-01 03:19:00 +00002405 u32 num = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002406
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002407 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002408 (lancer_chip(adapter) ||
2409 (!sriov_want(adapter) && be_physfn(adapter)))) {
2410 num = adapter->max_rss_queues;
Yuval Mintz30e80b52012-07-01 03:19:00 +00002411 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2412 }
2413 return num;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002414}
2415
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002416static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002417{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002418#define BE_MIN_MSIX_VECTORS 1
Parav Pandit045508a2012-03-26 14:27:13 +00002419 int i, status, num_vec, num_roce_vec = 0;
Sathya Perlad3791422012-09-28 04:39:44 +00002420 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002421
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002422 /* If RSS queues are not used, need a vec for default RX Q */
2423 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
Parav Pandit045508a2012-03-26 14:27:13 +00002424 if (be_roce_supported(adapter)) {
2425 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2426 (num_online_cpus() + 1));
2427 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2428 num_vec += num_roce_vec;
2429 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2430 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002431 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002432
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002433 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002434 adapter->msix_entries[i].entry = i;
2435
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002436 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002437 if (status == 0) {
2438 goto done;
2439 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002440 num_vec = status;
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002441 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2442 num_vec);
2443 if (!status)
Sathya Perla3abcded2010-10-03 22:12:27 -07002444 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002445 }
Sathya Perlad3791422012-09-28 04:39:44 +00002446
2447 dev_warn(dev, "MSIx enable failed\n");
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002448 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2449 if (!be_physfn(adapter))
2450 return status;
2451 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002452done:
Parav Pandit045508a2012-03-26 14:27:13 +00002453 if (be_roce_supported(adapter)) {
2454 if (num_vec > num_roce_vec) {
2455 adapter->num_msix_vec = num_vec - num_roce_vec;
2456 adapter->num_msix_roce_vec =
2457 num_vec - adapter->num_msix_vec;
2458 } else {
2459 adapter->num_msix_vec = num_vec;
2460 adapter->num_msix_roce_vec = 0;
2461 }
2462 } else
2463 adapter->num_msix_vec = num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002464 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002465 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002466}
2467
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002468static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002469 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002470{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002471 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002472}
2473
2474static int be_msix_register(struct be_adapter *adapter)
2475{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002476 struct net_device *netdev = adapter->netdev;
2477 struct be_eq_obj *eqo;
2478 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002479
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002480 for_all_evt_queues(adapter, eqo, i) {
2481 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2482 vec = be_msix_vec_get(adapter, eqo);
2483 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002484 if (status)
2485 goto err_msix;
2486 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002487
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002488 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002489err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002490 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2491 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2492 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2493 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002494 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002495 return status;
2496}
2497
2498static int be_irq_register(struct be_adapter *adapter)
2499{
2500 struct net_device *netdev = adapter->netdev;
2501 int status;
2502
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002503 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002504 status = be_msix_register(adapter);
2505 if (status == 0)
2506 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002507 /* INTx is not supported for VF */
2508 if (!be_physfn(adapter))
2509 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002510 }
2511
Sathya Perlae49cc342012-11-27 19:50:02 +00002512 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002513 netdev->irq = adapter->pdev->irq;
2514 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002515 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002516 if (status) {
2517 dev_err(&adapter->pdev->dev,
2518 "INTx request IRQ failed - err %d\n", status);
2519 return status;
2520 }
2521done:
2522 adapter->isr_registered = true;
2523 return 0;
2524}
2525
2526static void be_irq_unregister(struct be_adapter *adapter)
2527{
2528 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002529 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002530 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002531
2532 if (!adapter->isr_registered)
2533 return;
2534
2535 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002536 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002537 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002538 goto done;
2539 }
2540
2541 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002542 for_all_evt_queues(adapter, eqo, i)
2543 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002544
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002545done:
2546 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002547}
2548
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002549static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002550{
2551 struct be_queue_info *q;
2552 struct be_rx_obj *rxo;
2553 int i;
2554
2555 for_all_rx_queues(adapter, rxo, i) {
2556 q = &rxo->q;
2557 if (q->created) {
2558 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002559 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002560 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002561 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002562 }
2563}
2564
Sathya Perla889cd4b2010-05-30 23:33:45 +00002565static int be_close(struct net_device *netdev)
2566{
2567 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002568 struct be_eq_obj *eqo;
2569 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002570
Parav Pandit045508a2012-03-26 14:27:13 +00002571 be_roce_dev_close(adapter);
2572
Somnath Kotur04d3d622013-05-02 03:36:55 +00002573 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2574 for_all_evt_queues(adapter, eqo, i)
2575 napi_disable(&eqo->napi);
2576 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2577 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002578
2579 be_async_mcc_disable(adapter);
2580
2581 /* Wait for all pending tx completions to arrive so that
2582 * all tx skbs are freed.
2583 */
2584 be_tx_compl_clean(adapter);
Sathya Perlafba87552013-05-08 02:05:50 +00002585 netif_tx_disable(netdev);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002586
2587 be_rx_qs_destroy(adapter);
2588
2589 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002590 if (msix_enabled(adapter))
2591 synchronize_irq(be_msix_vec_get(adapter, eqo));
2592 else
2593 synchronize_irq(netdev->irq);
2594 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002595 }
2596
Sathya Perla889cd4b2010-05-30 23:33:45 +00002597 be_irq_unregister(adapter);
2598
Sathya Perla482c9e72011-06-29 23:33:17 +00002599 return 0;
2600}
2601
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002602static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002603{
2604 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002605 int rc, i, j;
2606 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002607
2608 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002609 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2610 sizeof(struct be_eth_rx_d));
2611 if (rc)
2612 return rc;
2613 }
2614
2615 /* The FW would like the default RXQ to be created first */
2616 rxo = default_rxo(adapter);
2617 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2618 adapter->if_handle, false, &rxo->rss_id);
2619 if (rc)
2620 return rc;
2621
2622 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002623 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002624 rx_frag_size, adapter->if_handle,
2625 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002626 if (rc)
2627 return rc;
2628 }
2629
2630 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002631 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2632 for_all_rss_queues(adapter, rxo, i) {
2633 if ((j + i) >= 128)
2634 break;
2635 rsstable[j + i] = rxo->rss_id;
2636 }
2637 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002638 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2639 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2640
2641 if (!BEx_chip(adapter))
2642 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2643 RSS_ENABLE_UDP_IPV6;
2644
2645 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2646 128);
2647 if (rc) {
2648 adapter->rss_flags = 0;
Sathya Perla482c9e72011-06-29 23:33:17 +00002649 return rc;
Suresh Reddy594ad542013-04-25 23:03:20 +00002650 }
Sathya Perla482c9e72011-06-29 23:33:17 +00002651 }
2652
2653 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002654 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002655 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002656 return 0;
2657}
2658
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002659static int be_open(struct net_device *netdev)
2660{
2661 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002662 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002663 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002664 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002665 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002666 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002667
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002668 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002669 if (status)
2670 goto err;
2671
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002672 status = be_irq_register(adapter);
2673 if (status)
2674 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002675
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002676 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002677 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002678
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002679 for_all_tx_queues(adapter, txo, i)
2680 be_cq_notify(adapter, txo->cq.id, true, 0);
2681
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002682 be_async_mcc_enable(adapter);
2683
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002684 for_all_evt_queues(adapter, eqo, i) {
2685 napi_enable(&eqo->napi);
2686 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2687 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002688 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002689
Sathya Perla323ff712012-09-28 04:39:43 +00002690 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002691 if (!status)
2692 be_link_status_update(adapter, link_status);
2693
Sathya Perlafba87552013-05-08 02:05:50 +00002694 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002695 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002696 return 0;
2697err:
2698 be_close(adapter->netdev);
2699 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002700}
2701
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002702static int be_setup_wol(struct be_adapter *adapter, bool enable)
2703{
2704 struct be_dma_mem cmd;
2705 int status = 0;
2706 u8 mac[ETH_ALEN];
2707
2708 memset(mac, 0, ETH_ALEN);
2709
2710 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002711 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
Joe Perches1f9061d22013-03-15 07:23:58 +00002712 GFP_KERNEL | __GFP_ZERO);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002713 if (cmd.va == NULL)
2714 return -1;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002715
2716 if (enable) {
2717 status = pci_write_config_dword(adapter->pdev,
2718 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2719 if (status) {
2720 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002721 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002722 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2723 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002724 return status;
2725 }
2726 status = be_cmd_enable_magic_wol(adapter,
2727 adapter->netdev->dev_addr, &cmd);
2728 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2729 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2730 } else {
2731 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2732 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2733 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2734 }
2735
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002736 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002737 return status;
2738}
2739
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002740/*
2741 * Generate a seed MAC address from the PF MAC Address using jhash.
2742 * MAC Address for VFs are assigned incrementally starting from the seed.
2743 * These addresses are programmed in the ASIC by the PF and the VF driver
2744 * queries for the MAC address during its probe.
2745 */
Sathya Perla4c876612013-02-03 20:30:11 +00002746static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002747{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002748 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002749 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002750 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002751 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002752
2753 be_vf_eth_addr_generate(adapter, mac);
2754
Sathya Perla11ac75e2011-12-13 00:58:50 +00002755 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002756 if (lancer_chip(adapter)) {
2757 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2758 } else {
2759 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002760 vf_cfg->if_handle,
2761 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002762 }
2763
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002764 if (status)
2765 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002766 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002767 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002768 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002769
2770 mac[5] += 1;
2771 }
2772 return status;
2773}
2774
Sathya Perla4c876612013-02-03 20:30:11 +00002775static int be_vfs_mac_query(struct be_adapter *adapter)
2776{
2777 int status, vf;
2778 u8 mac[ETH_ALEN];
2779 struct be_vf_cfg *vf_cfg;
2780 bool active;
2781
2782 for_all_vfs(adapter, vf_cfg, vf) {
2783 be_cmd_get_mac_from_list(adapter, mac, &active,
2784 &vf_cfg->pmac_id, 0);
2785
2786 status = be_cmd_mac_addr_query(adapter, mac, false,
2787 vf_cfg->if_handle, 0);
2788 if (status)
2789 return status;
2790 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2791 }
2792 return 0;
2793}
2794
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002795static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002796{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002797 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002798 u32 vf;
2799
Sathya Perla39f1d942012-05-08 19:41:24 +00002800 if (be_find_vfs(adapter, ASSIGNED)) {
Sathya Perla4c876612013-02-03 20:30:11 +00002801 dev_warn(&adapter->pdev->dev,
2802 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00002803 goto done;
2804 }
2805
Sathya Perlab4c1df92013-05-08 02:05:47 +00002806 pci_disable_sriov(adapter->pdev);
2807
Sathya Perla11ac75e2011-12-13 00:58:50 +00002808 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002809 if (lancer_chip(adapter))
2810 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2811 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002812 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2813 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002814
Sathya Perla11ac75e2011-12-13 00:58:50 +00002815 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2816 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002817done:
2818 kfree(adapter->vf_cfg);
2819 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002820}
2821
Sathya Perlaa54769f2011-10-24 02:45:00 +00002822static int be_clear(struct be_adapter *adapter)
2823{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002824 int i = 1;
2825
Sathya Perla191eb752012-02-23 18:50:13 +00002826 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2827 cancel_delayed_work_sync(&adapter->work);
2828 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2829 }
2830
Sathya Perla11ac75e2011-12-13 00:58:50 +00002831 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002832 be_vf_clear(adapter);
2833
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002834 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2835 be_cmd_pmac_del(adapter, adapter->if_handle,
2836 adapter->pmac_id[i], 0);
2837
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002838 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002839
2840 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002841 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002842 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002843 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002844
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002845 kfree(adapter->pmac_id);
2846 adapter->pmac_id = NULL;
2847
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002848 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002849 return 0;
2850}
2851
Sathya Perla4c876612013-02-03 20:30:11 +00002852static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002853{
Sathya Perla4c876612013-02-03 20:30:11 +00002854 struct be_vf_cfg *vf_cfg;
2855 u32 cap_flags, en_flags, vf;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002856 int status;
2857
Sathya Perla4c876612013-02-03 20:30:11 +00002858 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2859 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002860
Sathya Perla4c876612013-02-03 20:30:11 +00002861 for_all_vfs(adapter, vf_cfg, vf) {
2862 if (!BE3_chip(adapter))
Vasundhara Volama05f99d2013-04-21 23:28:17 +00002863 be_cmd_get_profile_config(adapter, &cap_flags,
2864 NULL, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00002865
2866 /* If a FW profile exists, then cap_flags are updated */
2867 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2868 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2869 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2870 &vf_cfg->if_handle, vf + 1);
2871 if (status)
2872 goto err;
2873 }
2874err:
2875 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002876}
2877
Sathya Perla39f1d942012-05-08 19:41:24 +00002878static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002879{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002880 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002881 int vf;
2882
Sathya Perla39f1d942012-05-08 19:41:24 +00002883 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2884 GFP_KERNEL);
2885 if (!adapter->vf_cfg)
2886 return -ENOMEM;
2887
Sathya Perla11ac75e2011-12-13 00:58:50 +00002888 for_all_vfs(adapter, vf_cfg, vf) {
2889 vf_cfg->if_handle = -1;
2890 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002891 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002892 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002893}
2894
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002895static int be_vf_setup(struct be_adapter *adapter)
2896{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002897 struct be_vf_cfg *vf_cfg;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002898 u16 def_vlan, lnk_speed;
Sathya Perla4c876612013-02-03 20:30:11 +00002899 int status, old_vfs, vf;
2900 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002901
Sathya Perla4c876612013-02-03 20:30:11 +00002902 old_vfs = be_find_vfs(adapter, ENABLED);
2903 if (old_vfs) {
2904 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2905 if (old_vfs != num_vfs)
2906 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2907 adapter->num_vfs = old_vfs;
Sathya Perla39f1d942012-05-08 19:41:24 +00002908 } else {
Sathya Perla4c876612013-02-03 20:30:11 +00002909 if (num_vfs > adapter->dev_num_vfs)
2910 dev_info(dev, "Device supports %d VFs and not %d\n",
2911 adapter->dev_num_vfs, num_vfs);
2912 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
Sathya Perlab4c1df92013-05-08 02:05:47 +00002913 if (!adapter->num_vfs)
Sathya Perla4c876612013-02-03 20:30:11 +00002914 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00002915 }
2916
2917 status = be_vf_setup_init(adapter);
2918 if (status)
2919 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002920
Sathya Perla4c876612013-02-03 20:30:11 +00002921 if (old_vfs) {
2922 for_all_vfs(adapter, vf_cfg, vf) {
2923 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2924 if (status)
2925 goto err;
2926 }
2927 } else {
2928 status = be_vfs_if_create(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002929 if (status)
2930 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002931 }
2932
Sathya Perla4c876612013-02-03 20:30:11 +00002933 if (old_vfs) {
2934 status = be_vfs_mac_query(adapter);
2935 if (status)
2936 goto err;
2937 } else {
Sathya Perla39f1d942012-05-08 19:41:24 +00002938 status = be_vf_eth_addr_config(adapter);
2939 if (status)
2940 goto err;
2941 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002942
Sathya Perla11ac75e2011-12-13 00:58:50 +00002943 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla4c876612013-02-03 20:30:11 +00002944 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2945 * Allow full available bandwidth
2946 */
2947 if (BE3_chip(adapter) && !old_vfs)
2948 be_cmd_set_qos(adapter, 1000, vf+1);
2949
2950 status = be_cmd_link_status_query(adapter, &lnk_speed,
2951 NULL, vf + 1);
2952 if (!status)
2953 vf_cfg->tx_rate = lnk_speed;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002954
2955 status = be_cmd_get_hsw_config(adapter, &def_vlan,
Sathya Perla4c876612013-02-03 20:30:11 +00002956 vf + 1, vf_cfg->if_handle);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002957 if (status)
2958 goto err;
2959 vf_cfg->def_vid = def_vlan;
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00002960
2961 be_cmd_enable_vf(adapter, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002962 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00002963
2964 if (!old_vfs) {
2965 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2966 if (status) {
2967 dev_err(dev, "SRIOV enable failed\n");
2968 adapter->num_vfs = 0;
2969 goto err;
2970 }
2971 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002972 return 0;
2973err:
Sathya Perla4c876612013-02-03 20:30:11 +00002974 dev_err(dev, "VF setup failed\n");
2975 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002976 return status;
2977}
2978
Sathya Perla30128032011-11-10 19:17:57 +00002979static void be_setup_init(struct be_adapter *adapter)
2980{
2981 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002982 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002983 adapter->if_handle = -1;
2984 adapter->be3_native = false;
2985 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002986 if (be_physfn(adapter))
2987 adapter->cmd_privileges = MAX_PRIVILEGES;
2988 else
2989 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00002990}
2991
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002992static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2993 bool *active_mac, u32 *pmac_id)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002994{
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002995 int status = 0;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002996
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002997 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2998 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2999 if (!lancer_chip(adapter) && !be_physfn(adapter))
3000 *active_mac = true;
3001 else
3002 *active_mac = false;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003003
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003004 return status;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003005 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003006
3007 if (lancer_chip(adapter)) {
3008 status = be_cmd_get_mac_from_list(adapter, mac,
3009 active_mac, pmac_id, 0);
3010 if (*active_mac) {
Sathya Perla5ee49792012-09-28 04:39:41 +00003011 status = be_cmd_mac_addr_query(adapter, mac, false,
3012 if_handle, *pmac_id);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003013 }
3014 } else if (be_physfn(adapter)) {
3015 /* For BE3, for PF get permanent MAC */
Sathya Perla5ee49792012-09-28 04:39:41 +00003016 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003017 *active_mac = false;
3018 } else {
3019 /* For BE3, for VF get soft MAC assigned by PF*/
Sathya Perla5ee49792012-09-28 04:39:41 +00003020 status = be_cmd_mac_addr_query(adapter, mac, false,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003021 if_handle, 0);
3022 *active_mac = true;
3023 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003024 return status;
3025}
3026
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003027static void be_get_resources(struct be_adapter *adapter)
3028{
Sathya Perla4c876612013-02-03 20:30:11 +00003029 u16 dev_num_vfs;
3030 int pos, status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003031 bool profile_present = false;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003032 u16 txq_count = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003033
Sathya Perla4c876612013-02-03 20:30:11 +00003034 if (!BEx_chip(adapter)) {
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003035 status = be_cmd_get_func_config(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003036 if (!status)
3037 profile_present = true;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003038 } else if (BE3_chip(adapter) && be_physfn(adapter)) {
3039 be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003040 }
3041
3042 if (profile_present) {
3043 /* Sanity fixes for Lancer */
3044 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
3045 BE_UC_PMAC_COUNT);
3046 adapter->max_vlans = min_t(u16, adapter->max_vlans,
3047 BE_NUM_VLANS_SUPPORTED);
3048 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
3049 BE_MAX_MC);
3050 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3051 MAX_TX_QS);
3052 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
3053 BE3_MAX_RSS_QS);
3054 adapter->max_event_queues = min_t(u16,
3055 adapter->max_event_queues,
3056 BE3_MAX_RSS_QS);
3057
3058 if (adapter->max_rss_queues &&
3059 adapter->max_rss_queues == adapter->max_rx_queues)
3060 adapter->max_rss_queues -= 1;
3061
3062 if (adapter->max_event_queues < adapter->max_rss_queues)
3063 adapter->max_rss_queues = adapter->max_event_queues;
3064
3065 } else {
3066 if (be_physfn(adapter))
3067 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3068 else
3069 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3070
3071 if (adapter->function_mode & FLEX10_MODE)
3072 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3073 else
3074 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3075
3076 adapter->max_mcast_mac = BE_MAX_MC;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003077 adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
3078 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3079 MAX_TX_QS);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003080 adapter->max_rss_queues = (adapter->be3_native) ?
3081 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3082 adapter->max_event_queues = BE3_MAX_RSS_QS;
3083
3084 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
3085 BE_IF_FLAGS_BROADCAST |
3086 BE_IF_FLAGS_MULTICAST |
3087 BE_IF_FLAGS_PASS_L3L4_ERRORS |
3088 BE_IF_FLAGS_MCAST_PROMISCUOUS |
3089 BE_IF_FLAGS_VLAN_PROMISCUOUS |
3090 BE_IF_FLAGS_PROMISCUOUS;
3091
3092 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3093 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
3094 }
Sathya Perla4c876612013-02-03 20:30:11 +00003095
3096 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
3097 if (pos) {
3098 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
3099 &dev_num_vfs);
3100 if (BE3_chip(adapter))
3101 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
3102 adapter->dev_num_vfs = dev_num_vfs;
3103 }
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003104}
3105
Sathya Perla39f1d942012-05-08 19:41:24 +00003106/* Routine to query per function resource limits */
3107static int be_get_config(struct be_adapter *adapter)
3108{
Sathya Perla4c876612013-02-03 20:30:11 +00003109 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003110
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003111 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3112 &adapter->function_mode,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003113 &adapter->function_caps,
3114 &adapter->asic_rev);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003115 if (status)
3116 goto err;
3117
3118 be_get_resources(adapter);
3119
3120 /* primary mac needs 1 pmac entry */
3121 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3122 sizeof(u32), GFP_KERNEL);
3123 if (!adapter->pmac_id) {
3124 status = -ENOMEM;
3125 goto err;
3126 }
3127
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003128err:
3129 return status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003130}
3131
Sathya Perla5fb379e2009-06-18 00:02:59 +00003132static int be_setup(struct be_adapter *adapter)
3133{
Sathya Perla39f1d942012-05-08 19:41:24 +00003134 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003135 u32 en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003136 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003137 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003138 u8 mac[ETH_ALEN];
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003139 bool active_mac;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003140
Sathya Perla30128032011-11-10 19:17:57 +00003141 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003142
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003143 if (!lancer_chip(adapter))
3144 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003145
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003146 status = be_get_config(adapter);
3147 if (status)
3148 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003149
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003150 status = be_msix_enable(adapter);
3151 if (status)
3152 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003153
3154 status = be_evt_queues_create(adapter);
3155 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003156 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003157
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003158 status = be_tx_cqs_create(adapter);
3159 if (status)
3160 goto err;
3161
3162 status = be_rx_cqs_create(adapter);
3163 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003164 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003165
Sathya Perla5fb379e2009-06-18 00:02:59 +00003166 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003167 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003168 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003169
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003170 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3171 /* In UMC mode FW does not return right privileges.
3172 * Override with correct privilege equivalent to PF.
3173 */
3174 if (be_is_mc(adapter))
3175 adapter->cmd_privileges = MAX_PRIVILEGES;
3176
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003177 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3178 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00003179
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003180 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003181 en_flags |= BE_IF_FLAGS_RSS;
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003182
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003183 en_flags = en_flags & adapter->if_cap_flags;
Padmanabh Ratnakar0b13fb42012-07-18 02:51:58 +00003184
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003185 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003186 &adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003187 if (status != 0)
3188 goto err;
3189
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003190 memset(mac, 0, ETH_ALEN);
3191 active_mac = false;
3192 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3193 &active_mac, &adapter->pmac_id[0]);
3194 if (status != 0)
3195 goto err;
3196
3197 if (!active_mac) {
3198 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3199 &adapter->pmac_id[0], 0);
3200 if (status != 0)
3201 goto err;
3202 }
3203
3204 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3205 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3206 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003207 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00003208
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003209 status = be_tx_qs_create(adapter);
3210 if (status)
3211 goto err;
3212
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003213 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003214
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003215 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003216 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003217
3218 be_set_rx_mode(adapter->netdev);
3219
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003220 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003221
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003222 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3223 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003224 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003225
Sathya Perlab4c1df92013-05-08 02:05:47 +00003226 if (be_physfn(adapter)) {
Sathya Perla39f1d942012-05-08 19:41:24 +00003227 if (adapter->dev_num_vfs)
3228 be_vf_setup(adapter);
3229 else
3230 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003231 }
3232
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003233 status = be_cmd_get_phy_info(adapter);
3234 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003235 adapter->phy.fc_autoneg = 1;
3236
Sathya Perla191eb752012-02-23 18:50:13 +00003237 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3238 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003239 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003240err:
3241 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003242 return status;
3243}
3244
Ivan Vecera66268732011-12-08 01:31:21 +00003245#ifdef CONFIG_NET_POLL_CONTROLLER
3246static void be_netpoll(struct net_device *netdev)
3247{
3248 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003249 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003250 int i;
3251
Sathya Perlae49cc342012-11-27 19:50:02 +00003252 for_all_evt_queues(adapter, eqo, i) {
3253 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3254 napi_schedule(&eqo->napi);
3255 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003256
3257 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003258}
3259#endif
3260
Ajit Khaparde84517482009-09-04 03:12:16 +00003261#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003262char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3263
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003264static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003265 const u8 *p, u32 img_start, int image_size,
3266 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003267{
3268 u32 crc_offset;
3269 u8 flashed_crc[4];
3270 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003271
3272 crc_offset = hdr_size + img_start + image_size - 4;
3273
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003274 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003275
3276 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003277 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003278 if (status) {
3279 dev_err(&adapter->pdev->dev,
3280 "could not get crc from flash, not flashing redboot\n");
3281 return false;
3282 }
3283
3284 /*update redboot only if crc does not match*/
3285 if (!memcmp(flashed_crc, p, 4))
3286 return false;
3287 else
3288 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003289}
3290
Sathya Perla306f1342011-08-02 19:57:45 +00003291static bool phy_flashing_required(struct be_adapter *adapter)
3292{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003293 return (adapter->phy.phy_type == TN_8022 &&
3294 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003295}
3296
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003297static bool is_comp_in_ufi(struct be_adapter *adapter,
3298 struct flash_section_info *fsec, int type)
3299{
3300 int i = 0, img_type = 0;
3301 struct flash_section_info_g2 *fsec_g2 = NULL;
3302
Sathya Perlaca34fe32012-11-06 17:48:56 +00003303 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003304 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3305
3306 for (i = 0; i < MAX_FLASH_COMP; i++) {
3307 if (fsec_g2)
3308 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3309 else
3310 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3311
3312 if (img_type == type)
3313 return true;
3314 }
3315 return false;
3316
3317}
3318
3319struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3320 int header_size,
3321 const struct firmware *fw)
3322{
3323 struct flash_section_info *fsec = NULL;
3324 const u8 *p = fw->data;
3325
3326 p += header_size;
3327 while (p < (fw->data + fw->size)) {
3328 fsec = (struct flash_section_info *)p;
3329 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3330 return fsec;
3331 p += 32;
3332 }
3333 return NULL;
3334}
3335
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003336static int be_flash(struct be_adapter *adapter, const u8 *img,
3337 struct be_dma_mem *flash_cmd, int optype, int img_size)
3338{
3339 u32 total_bytes = 0, flash_op, num_bytes = 0;
3340 int status = 0;
3341 struct be_cmd_write_flashrom *req = flash_cmd->va;
3342
3343 total_bytes = img_size;
3344 while (total_bytes) {
3345 num_bytes = min_t(u32, 32*1024, total_bytes);
3346
3347 total_bytes -= num_bytes;
3348
3349 if (!total_bytes) {
3350 if (optype == OPTYPE_PHY_FW)
3351 flash_op = FLASHROM_OPER_PHY_FLASH;
3352 else
3353 flash_op = FLASHROM_OPER_FLASH;
3354 } else {
3355 if (optype == OPTYPE_PHY_FW)
3356 flash_op = FLASHROM_OPER_PHY_SAVE;
3357 else
3358 flash_op = FLASHROM_OPER_SAVE;
3359 }
3360
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003361 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003362 img += num_bytes;
3363 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3364 flash_op, num_bytes);
3365 if (status) {
3366 if (status == ILLEGAL_IOCTL_REQ &&
3367 optype == OPTYPE_PHY_FW)
3368 break;
3369 dev_err(&adapter->pdev->dev,
3370 "cmd to write to flash rom failed.\n");
3371 return status;
3372 }
3373 }
3374 return 0;
3375}
3376
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003377/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003378static int be_flash_BEx(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003379 const struct firmware *fw,
3380 struct be_dma_mem *flash_cmd,
3381 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003382
Ajit Khaparde84517482009-09-04 03:12:16 +00003383{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003384 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003385 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003386 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003387 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003388 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003389 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003390
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003391 struct flash_comp gen3_flash_types[] = {
3392 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3393 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3394 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3395 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3396 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3397 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3398 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3399 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3400 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3401 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3402 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3403 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3404 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3405 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3406 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3407 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3408 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3409 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3410 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3411 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003412 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003413
3414 struct flash_comp gen2_flash_types[] = {
3415 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3416 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3417 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3418 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3419 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3420 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3421 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3422 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3423 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3424 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3425 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3426 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3427 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3428 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3429 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3430 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003431 };
3432
Sathya Perlaca34fe32012-11-06 17:48:56 +00003433 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003434 pflashcomp = gen3_flash_types;
3435 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003436 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003437 } else {
3438 pflashcomp = gen2_flash_types;
3439 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003440 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003441 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003442
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003443 /* Get flash section info*/
3444 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3445 if (!fsec) {
3446 dev_err(&adapter->pdev->dev,
3447 "Invalid Cookie. UFI corrupted ?\n");
3448 return -1;
3449 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003450 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003451 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003452 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003453
3454 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3455 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3456 continue;
3457
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003458 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3459 !phy_flashing_required(adapter))
3460 continue;
3461
3462 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3463 redboot = be_flash_redboot(adapter, fw->data,
3464 pflashcomp[i].offset, pflashcomp[i].size,
3465 filehdr_size + img_hdrs_size);
3466 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003467 continue;
3468 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003469
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003470 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003471 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003472 if (p + pflashcomp[i].size > fw->data + fw->size)
3473 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003474
3475 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3476 pflashcomp[i].size);
3477 if (status) {
3478 dev_err(&adapter->pdev->dev,
3479 "Flashing section type %d failed.\n",
3480 pflashcomp[i].img_type);
3481 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003482 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003483 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003484 return 0;
3485}
3486
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003487static int be_flash_skyhawk(struct be_adapter *adapter,
3488 const struct firmware *fw,
3489 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003490{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003491 int status = 0, i, filehdr_size = 0;
3492 int img_offset, img_size, img_optype, redboot;
3493 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3494 const u8 *p = fw->data;
3495 struct flash_section_info *fsec = NULL;
3496
3497 filehdr_size = sizeof(struct flash_file_hdr_g3);
3498 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3499 if (!fsec) {
3500 dev_err(&adapter->pdev->dev,
3501 "Invalid Cookie. UFI corrupted ?\n");
3502 return -1;
3503 }
3504
3505 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3506 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3507 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3508
3509 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3510 case IMAGE_FIRMWARE_iSCSI:
3511 img_optype = OPTYPE_ISCSI_ACTIVE;
3512 break;
3513 case IMAGE_BOOT_CODE:
3514 img_optype = OPTYPE_REDBOOT;
3515 break;
3516 case IMAGE_OPTION_ROM_ISCSI:
3517 img_optype = OPTYPE_BIOS;
3518 break;
3519 case IMAGE_OPTION_ROM_PXE:
3520 img_optype = OPTYPE_PXE_BIOS;
3521 break;
3522 case IMAGE_OPTION_ROM_FCoE:
3523 img_optype = OPTYPE_FCOE_BIOS;
3524 break;
3525 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3526 img_optype = OPTYPE_ISCSI_BACKUP;
3527 break;
3528 case IMAGE_NCSI:
3529 img_optype = OPTYPE_NCSI_FW;
3530 break;
3531 default:
3532 continue;
3533 }
3534
3535 if (img_optype == OPTYPE_REDBOOT) {
3536 redboot = be_flash_redboot(adapter, fw->data,
3537 img_offset, img_size,
3538 filehdr_size + img_hdrs_size);
3539 if (!redboot)
3540 continue;
3541 }
3542
3543 p = fw->data;
3544 p += filehdr_size + img_offset + img_hdrs_size;
3545 if (p + img_size > fw->data + fw->size)
3546 return -1;
3547
3548 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3549 if (status) {
3550 dev_err(&adapter->pdev->dev,
3551 "Flashing section type %d failed.\n",
3552 fsec->fsec_entry[i].type);
3553 return status;
3554 }
3555 }
3556 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003557}
3558
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003559static int lancer_fw_download(struct be_adapter *adapter,
3560 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003561{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003562#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3563#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3564 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003565 const u8 *data_ptr = NULL;
3566 u8 *dest_image_ptr = NULL;
3567 size_t image_size = 0;
3568 u32 chunk_size = 0;
3569 u32 data_written = 0;
3570 u32 offset = 0;
3571 int status = 0;
3572 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003573 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003574
3575 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3576 dev_err(&adapter->pdev->dev,
3577 "FW Image not properly aligned. "
3578 "Length must be 4 byte aligned.\n");
3579 status = -EINVAL;
3580 goto lancer_fw_exit;
3581 }
3582
3583 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3584 + LANCER_FW_DOWNLOAD_CHUNK;
3585 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00003586 &flash_cmd.dma, GFP_KERNEL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003587 if (!flash_cmd.va) {
3588 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003589 goto lancer_fw_exit;
3590 }
3591
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003592 dest_image_ptr = flash_cmd.va +
3593 sizeof(struct lancer_cmd_req_write_object);
3594 image_size = fw->size;
3595 data_ptr = fw->data;
3596
3597 while (image_size) {
3598 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3599
3600 /* Copy the image chunk content. */
3601 memcpy(dest_image_ptr, data_ptr, chunk_size);
3602
3603 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003604 chunk_size, offset,
3605 LANCER_FW_DOWNLOAD_LOCATION,
3606 &data_written, &change_status,
3607 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003608 if (status)
3609 break;
3610
3611 offset += data_written;
3612 data_ptr += data_written;
3613 image_size -= data_written;
3614 }
3615
3616 if (!status) {
3617 /* Commit the FW written */
3618 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003619 0, offset,
3620 LANCER_FW_DOWNLOAD_LOCATION,
3621 &data_written, &change_status,
3622 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003623 }
3624
3625 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3626 flash_cmd.dma);
3627 if (status) {
3628 dev_err(&adapter->pdev->dev,
3629 "Firmware load error. "
3630 "Status code: 0x%x Additional Status: 0x%x\n",
3631 status, add_status);
3632 goto lancer_fw_exit;
3633 }
3634
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003635 if (change_status == LANCER_FW_RESET_NEEDED) {
Somnath Kotur5c510812013-05-30 02:52:23 +00003636 status = lancer_physdev_ctrl(adapter,
3637 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003638 if (status) {
3639 dev_err(&adapter->pdev->dev,
3640 "Adapter busy for FW reset.\n"
3641 "New FW will not be active.\n");
3642 goto lancer_fw_exit;
3643 }
3644 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3645 dev_err(&adapter->pdev->dev,
3646 "System reboot required for new FW"
3647 " to be active\n");
3648 }
3649
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003650 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3651lancer_fw_exit:
3652 return status;
3653}
3654
Sathya Perlaca34fe32012-11-06 17:48:56 +00003655#define UFI_TYPE2 2
3656#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003657#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00003658#define UFI_TYPE4 4
3659static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003660 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003661{
3662 if (fhdr == NULL)
3663 goto be_get_ufi_exit;
3664
Sathya Perlaca34fe32012-11-06 17:48:56 +00003665 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3666 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003667 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3668 if (fhdr->asic_type_rev == 0x10)
3669 return UFI_TYPE3R;
3670 else
3671 return UFI_TYPE3;
3672 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00003673 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003674
3675be_get_ufi_exit:
3676 dev_err(&adapter->pdev->dev,
3677 "UFI and Interface are not compatible for flashing\n");
3678 return -1;
3679}
3680
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003681static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3682{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003683 struct flash_file_hdr_g3 *fhdr3;
3684 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003685 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003686 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003687 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003688
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003689 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003690 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3691 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003692 if (!flash_cmd.va) {
3693 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003694 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003695 }
3696
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003697 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003698 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003699
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003700 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003701
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003702 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3703 for (i = 0; i < num_imgs; i++) {
3704 img_hdr_ptr = (struct image_hdr *)(fw->data +
3705 (sizeof(struct flash_file_hdr_g3) +
3706 i * sizeof(struct image_hdr)));
3707 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003708 switch (ufi_type) {
3709 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003710 status = be_flash_skyhawk(adapter, fw,
3711 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003712 break;
3713 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00003714 status = be_flash_BEx(adapter, fw, &flash_cmd,
3715 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003716 break;
3717 case UFI_TYPE3:
3718 /* Do not flash this ufi on BE3-R cards */
3719 if (adapter->asic_rev < 0x10)
3720 status = be_flash_BEx(adapter, fw,
3721 &flash_cmd,
3722 num_imgs);
3723 else {
3724 status = -1;
3725 dev_err(&adapter->pdev->dev,
3726 "Can't load BE3 UFI on BE3R\n");
3727 }
3728 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003729 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003730 }
3731
Sathya Perlaca34fe32012-11-06 17:48:56 +00003732 if (ufi_type == UFI_TYPE2)
3733 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003734 else if (ufi_type == -1)
3735 status = -1;
3736
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003737 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3738 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003739 if (status) {
3740 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003741 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003742 }
3743
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003744 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003745
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003746be_fw_exit:
3747 return status;
3748}
3749
3750int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3751{
3752 const struct firmware *fw;
3753 int status;
3754
3755 if (!netif_running(adapter->netdev)) {
3756 dev_err(&adapter->pdev->dev,
3757 "Firmware load not allowed (interface is down)\n");
3758 return -1;
3759 }
3760
3761 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3762 if (status)
3763 goto fw_exit;
3764
3765 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3766
3767 if (lancer_chip(adapter))
3768 status = lancer_fw_download(adapter, fw);
3769 else
3770 status = be_fw_download(adapter, fw);
3771
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003772 if (!status)
3773 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3774 adapter->fw_on_flash);
3775
Ajit Khaparde84517482009-09-04 03:12:16 +00003776fw_exit:
3777 release_firmware(fw);
3778 return status;
3779}
3780
stephen hemmingere5686ad2012-01-05 19:10:25 +00003781static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003782 .ndo_open = be_open,
3783 .ndo_stop = be_close,
3784 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003785 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003786 .ndo_set_mac_address = be_mac_addr_set,
3787 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003788 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003789 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003790 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3791 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003792 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003793 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003794 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003795 .ndo_get_vf_config = be_get_vf_config,
3796#ifdef CONFIG_NET_POLL_CONTROLLER
3797 .ndo_poll_controller = be_netpoll,
3798#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003799};
3800
3801static void be_netdev_init(struct net_device *netdev)
3802{
3803 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003804 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003805 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003806
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003807 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003808 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00003809 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003810 if (be_multi_rxq(adapter))
3811 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003812
3813 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00003814 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003815
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003816 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003817 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003818
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003819 netdev->priv_flags |= IFF_UNICAST_FLT;
3820
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003821 netdev->flags |= IFF_MULTICAST;
3822
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00003823 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003824
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003825 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003826
3827 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3828
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003829 for_all_evt_queues(adapter, eqo, i)
3830 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003831}
3832
3833static void be_unmap_pci_bars(struct be_adapter *adapter)
3834{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003835 if (adapter->csr)
3836 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003837 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00003838 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003839}
3840
Sathya Perlace66f782012-11-06 17:48:58 +00003841static int db_bar(struct be_adapter *adapter)
3842{
3843 if (lancer_chip(adapter) || !be_physfn(adapter))
3844 return 0;
3845 else
3846 return 4;
3847}
3848
3849static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00003850{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00003851 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00003852 adapter->roce_db.size = 4096;
3853 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3854 db_bar(adapter));
3855 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3856 db_bar(adapter));
3857 }
Parav Pandit045508a2012-03-26 14:27:13 +00003858 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003859}
3860
3861static int be_map_pci_bars(struct be_adapter *adapter)
3862{
3863 u8 __iomem *addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003864 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003865
Sathya Perlace66f782012-11-06 17:48:58 +00003866 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3867 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3868 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003869
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003870 if (BEx_chip(adapter) && be_physfn(adapter)) {
3871 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3872 if (adapter->csr == NULL)
3873 return -ENOMEM;
3874 }
3875
Sathya Perlace66f782012-11-06 17:48:58 +00003876 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003877 if (addr == NULL)
3878 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003879 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003880
3881 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003882 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00003883
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003884pci_map_err:
3885 be_unmap_pci_bars(adapter);
3886 return -ENOMEM;
3887}
3888
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003889static void be_ctrl_cleanup(struct be_adapter *adapter)
3890{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003891 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003892
3893 be_unmap_pci_bars(adapter);
3894
3895 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003896 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3897 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003898
Sathya Perla5b8821b2011-08-02 19:57:44 +00003899 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003900 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003901 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3902 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003903}
3904
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003905static int be_ctrl_init(struct be_adapter *adapter)
3906{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003907 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3908 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003909 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00003910 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003911 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003912
Sathya Perlace66f782012-11-06 17:48:58 +00003913 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3914 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3915 SLI_INTF_FAMILY_SHIFT;
3916 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3917
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003918 status = be_map_pci_bars(adapter);
3919 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003920 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003921
3922 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003923 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3924 mbox_mem_alloc->size,
3925 &mbox_mem_alloc->dma,
3926 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003927 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003928 status = -ENOMEM;
3929 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003930 }
3931 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3932 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3933 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3934 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003935
Sathya Perla5b8821b2011-08-02 19:57:44 +00003936 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3937 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
Joe Perches1f9061d22013-03-15 07:23:58 +00003938 &rx_filter->dma,
3939 GFP_KERNEL | __GFP_ZERO);
Sathya Perla5b8821b2011-08-02 19:57:44 +00003940 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003941 status = -ENOMEM;
3942 goto free_mbox;
3943 }
Joe Perches1f9061d22013-03-15 07:23:58 +00003944
Ivan Vecera29849612010-12-14 05:43:19 +00003945 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003946 spin_lock_init(&adapter->mcc_lock);
3947 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003948
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003949 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003950 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003951 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003952
3953free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003954 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3955 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003956
3957unmap_pci_bars:
3958 be_unmap_pci_bars(adapter);
3959
3960done:
3961 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003962}
3963
3964static void be_stats_cleanup(struct be_adapter *adapter)
3965{
Sathya Perla3abcded2010-10-03 22:12:27 -07003966 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003967
3968 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003969 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3970 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003971}
3972
3973static int be_stats_init(struct be_adapter *adapter)
3974{
Sathya Perla3abcded2010-10-03 22:12:27 -07003975 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003976
Sathya Perlaca34fe32012-11-06 17:48:56 +00003977 if (lancer_chip(adapter))
3978 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3979 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003980 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Sathya Perlaca34fe32012-11-06 17:48:56 +00003981 else
3982 /* BE3 and Skyhawk */
3983 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3984
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003985 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
Joe Perches1f9061d22013-03-15 07:23:58 +00003986 GFP_KERNEL | __GFP_ZERO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003987 if (cmd->va == NULL)
3988 return -1;
3989 return 0;
3990}
3991
Bill Pemberton3bc6b062012-12-03 09:23:09 -05003992static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003993{
3994 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003995
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003996 if (!adapter)
3997 return;
3998
Parav Pandit045508a2012-03-26 14:27:13 +00003999 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004000 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004001
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004002 cancel_delayed_work_sync(&adapter->func_recovery_work);
4003
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004004 unregister_netdev(adapter->netdev);
4005
Sathya Perla5fb379e2009-06-18 00:02:59 +00004006 be_clear(adapter);
4007
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004008 /* tell fw we're done with firing cmds */
4009 be_cmd_fw_clean(adapter);
4010
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004011 be_stats_cleanup(adapter);
4012
4013 be_ctrl_cleanup(adapter);
4014
Sathya Perlad6b6d982012-09-05 01:56:48 +00004015 pci_disable_pcie_error_reporting(pdev);
4016
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004017 pci_set_drvdata(pdev, NULL);
4018 pci_release_regions(pdev);
4019 pci_disable_device(pdev);
4020
4021 free_netdev(adapter->netdev);
4022}
4023
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004024bool be_is_wol_supported(struct be_adapter *adapter)
4025{
4026 return ((adapter->wol_cap & BE_WOL_CAP) &&
4027 !be_is_wol_excluded(adapter)) ? true : false;
4028}
4029
Somnath Kotur941a77d2012-05-17 22:59:03 +00004030u32 be_get_fw_log_level(struct be_adapter *adapter)
4031{
4032 struct be_dma_mem extfat_cmd;
4033 struct be_fat_conf_params *cfgs;
4034 int status;
4035 u32 level = 0;
4036 int j;
4037
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004038 if (lancer_chip(adapter))
4039 return 0;
4040
Somnath Kotur941a77d2012-05-17 22:59:03 +00004041 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4042 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4043 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4044 &extfat_cmd.dma);
4045
4046 if (!extfat_cmd.va) {
4047 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4048 __func__);
4049 goto err;
4050 }
4051
4052 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4053 if (!status) {
4054 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4055 sizeof(struct be_cmd_resp_hdr));
Anton Blanchardac46a462012-07-24 15:05:25 +00004056 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
Somnath Kotur941a77d2012-05-17 22:59:03 +00004057 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4058 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4059 }
4060 }
4061 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4062 extfat_cmd.dma);
4063err:
4064 return level;
4065}
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004066
Sathya Perla39f1d942012-05-08 19:41:24 +00004067static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004068{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004069 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00004070 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004071
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004072 status = be_cmd_get_cntl_attributes(adapter);
4073 if (status)
4074 return status;
4075
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004076 status = be_cmd_get_acpi_wol_cap(adapter);
4077 if (status) {
4078 /* in case of a failure to get wol capabillities
4079 * check the exclusion list to determine WOL capability */
4080 if (!be_is_wol_excluded(adapter))
4081 adapter->wol_cap |= BE_WOL_CAP;
4082 }
4083
4084 if (be_is_wol_supported(adapter))
4085 adapter->wol = true;
4086
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004087 /* Must be a power of 2 or else MODULO will BUG_ON */
4088 adapter->be_get_temp_freq = 64;
4089
Somnath Kotur941a77d2012-05-17 22:59:03 +00004090 level = be_get_fw_log_level(adapter);
4091 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4092
Sathya Perla2243e2e2009-11-22 22:02:03 +00004093 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004094}
4095
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004096static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004097{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004098 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004099 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004100
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004101 status = lancer_test_and_set_rdy_state(adapter);
4102 if (status)
4103 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004104
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004105 if (netif_running(adapter->netdev))
4106 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004107
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004108 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004109
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004110 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004111
4112 status = be_setup(adapter);
4113 if (status)
4114 goto err;
4115
4116 if (netif_running(adapter->netdev)) {
4117 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004118 if (status)
4119 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004120 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004121
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004122 dev_err(dev, "Error recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004123 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004124err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004125 if (status == -EAGAIN)
4126 dev_err(dev, "Waiting for resource provisioning\n");
4127 else
4128 dev_err(dev, "Error recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004129
4130 return status;
4131}
4132
4133static void be_func_recovery_task(struct work_struct *work)
4134{
4135 struct be_adapter *adapter =
4136 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004137 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004138
4139 be_detect_error(adapter);
4140
4141 if (adapter->hw_error && lancer_chip(adapter)) {
4142
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004143 rtnl_lock();
4144 netif_device_detach(adapter->netdev);
4145 rtnl_unlock();
4146
4147 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004148 if (!status)
4149 netif_device_attach(adapter->netdev);
4150 }
4151
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004152 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4153 * no need to attempt further recovery.
4154 */
4155 if (!status || status == -EAGAIN)
4156 schedule_delayed_work(&adapter->func_recovery_work,
4157 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004158}
4159
4160static void be_worker(struct work_struct *work)
4161{
4162 struct be_adapter *adapter =
4163 container_of(work, struct be_adapter, work.work);
4164 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004165 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004166 int i;
4167
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004168 /* when interrupts are not yet enabled, just reap any pending
4169 * mcc completions */
4170 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004171 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004172 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004173 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004174 goto reschedule;
4175 }
4176
4177 if (!adapter->stats_cmd_sent) {
4178 if (lancer_chip(adapter))
4179 lancer_cmd_get_pport_stats(adapter,
4180 &adapter->stats_cmd);
4181 else
4182 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4183 }
4184
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004185 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4186 be_cmd_get_die_temperature(adapter);
4187
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004188 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004189 if (rxo->rx_post_starved) {
4190 rxo->rx_post_starved = false;
4191 be_post_rx_frags(rxo, GFP_KERNEL);
4192 }
4193 }
4194
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004195 for_all_evt_queues(adapter, eqo, i)
4196 be_eqd_update(adapter, eqo);
4197
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004198reschedule:
4199 adapter->work_counter++;
4200 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4201}
4202
Sathya Perla39f1d942012-05-08 19:41:24 +00004203static bool be_reset_required(struct be_adapter *adapter)
4204{
Sathya Perlad79c0a22012-06-05 19:37:22 +00004205 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004206}
4207
Sathya Perlad3791422012-09-28 04:39:44 +00004208static char *mc_name(struct be_adapter *adapter)
4209{
4210 if (adapter->function_mode & FLEX10_MODE)
4211 return "FLEX10";
4212 else if (adapter->function_mode & VNIC_MODE)
4213 return "vNIC";
4214 else if (adapter->function_mode & UMC_ENABLED)
4215 return "UMC";
4216 else
4217 return "";
4218}
4219
4220static inline char *func_name(struct be_adapter *adapter)
4221{
4222 return be_physfn(adapter) ? "PF" : "VF";
4223}
4224
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004225static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004226{
4227 int status = 0;
4228 struct be_adapter *adapter;
4229 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004230 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004231
4232 status = pci_enable_device(pdev);
4233 if (status)
4234 goto do_none;
4235
4236 status = pci_request_regions(pdev, DRV_NAME);
4237 if (status)
4238 goto disable_dev;
4239 pci_set_master(pdev);
4240
Sathya Perla7f640062012-06-05 19:37:20 +00004241 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004242 if (netdev == NULL) {
4243 status = -ENOMEM;
4244 goto rel_reg;
4245 }
4246 adapter = netdev_priv(netdev);
4247 adapter->pdev = pdev;
4248 pci_set_drvdata(pdev, adapter);
4249 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004250 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004251
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004252 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004253 if (!status) {
Craig Hada2bd92cd2013-04-21 23:28:18 +00004254 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4255 if (status < 0) {
4256 dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4257 goto free_netdev;
4258 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004259 netdev->features |= NETIF_F_HIGHDMA;
4260 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004261 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Somnath Kotur0c5fed02013-06-11 17:18:22 +05304262 if (!status)
4263 status = dma_set_coherent_mask(&pdev->dev,
4264 DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004265 if (status) {
4266 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4267 goto free_netdev;
4268 }
4269 }
4270
Sathya Perlad6b6d982012-09-05 01:56:48 +00004271 status = pci_enable_pcie_error_reporting(pdev);
4272 if (status)
4273 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4274
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004275 status = be_ctrl_init(adapter);
4276 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004277 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004278
Sathya Perla2243e2e2009-11-22 22:02:03 +00004279 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004280 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004281 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004282 if (status)
4283 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004284 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004285
Sathya Perla39f1d942012-05-08 19:41:24 +00004286 if (be_reset_required(adapter)) {
4287 status = be_cmd_reset_function(adapter);
4288 if (status)
4289 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004290
Kalesh AP2d177be2013-04-28 22:22:29 +00004291 /* Wait for interrupts to quiesce after an FLR */
4292 msleep(100);
4293 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004294
4295 /* Allow interrupts for other ULPs running on NIC function */
4296 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004297
Kalesh AP2d177be2013-04-28 22:22:29 +00004298 /* tell fw we're ready to fire cmds */
4299 status = be_cmd_fw_init(adapter);
4300 if (status)
4301 goto ctrl_clean;
4302
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004303 status = be_stats_init(adapter);
4304 if (status)
4305 goto ctrl_clean;
4306
Sathya Perla39f1d942012-05-08 19:41:24 +00004307 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004308 if (status)
4309 goto stats_clean;
4310
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004311 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004312 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004313 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004314
Sathya Perla5fb379e2009-06-18 00:02:59 +00004315 status = be_setup(adapter);
4316 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004317 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004318
Sathya Perla3abcded2010-10-03 22:12:27 -07004319 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004320 status = register_netdev(netdev);
4321 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004322 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004323
Parav Pandit045508a2012-03-26 14:27:13 +00004324 be_roce_dev_add(adapter);
4325
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004326 schedule_delayed_work(&adapter->func_recovery_work,
4327 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004328
4329 be_cmd_query_port_name(adapter, &port_name);
4330
Sathya Perlad3791422012-09-28 04:39:44 +00004331 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4332 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004333
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004334 return 0;
4335
Sathya Perla5fb379e2009-06-18 00:02:59 +00004336unsetup:
4337 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004338stats_clean:
4339 be_stats_cleanup(adapter);
4340ctrl_clean:
4341 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004342free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004343 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004344 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004345rel_reg:
4346 pci_release_regions(pdev);
4347disable_dev:
4348 pci_disable_device(pdev);
4349do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004350 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004351 return status;
4352}
4353
4354static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4355{
4356 struct be_adapter *adapter = pci_get_drvdata(pdev);
4357 struct net_device *netdev = adapter->netdev;
4358
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004359 if (adapter->wol)
4360 be_setup_wol(adapter, true);
4361
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004362 cancel_delayed_work_sync(&adapter->func_recovery_work);
4363
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004364 netif_device_detach(netdev);
4365 if (netif_running(netdev)) {
4366 rtnl_lock();
4367 be_close(netdev);
4368 rtnl_unlock();
4369 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004370 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004371
4372 pci_save_state(pdev);
4373 pci_disable_device(pdev);
4374 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4375 return 0;
4376}
4377
4378static int be_resume(struct pci_dev *pdev)
4379{
4380 int status = 0;
4381 struct be_adapter *adapter = pci_get_drvdata(pdev);
4382 struct net_device *netdev = adapter->netdev;
4383
4384 netif_device_detach(netdev);
4385
4386 status = pci_enable_device(pdev);
4387 if (status)
4388 return status;
4389
4390 pci_set_power_state(pdev, 0);
4391 pci_restore_state(pdev);
4392
Sathya Perla2243e2e2009-11-22 22:02:03 +00004393 /* tell fw we're ready to fire cmds */
4394 status = be_cmd_fw_init(adapter);
4395 if (status)
4396 return status;
4397
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004398 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004399 if (netif_running(netdev)) {
4400 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004401 be_open(netdev);
4402 rtnl_unlock();
4403 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004404
4405 schedule_delayed_work(&adapter->func_recovery_work,
4406 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004407 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004408
4409 if (adapter->wol)
4410 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004411
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004412 return 0;
4413}
4414
Sathya Perla82456b02010-02-17 01:35:37 +00004415/*
4416 * An FLR will stop BE from DMAing any data.
4417 */
4418static void be_shutdown(struct pci_dev *pdev)
4419{
4420 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004421
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004422 if (!adapter)
4423 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004424
Sathya Perla0f4a6822011-03-21 20:49:28 +00004425 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004426 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004427
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004428 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004429
Ajit Khaparde57841862011-04-06 18:08:43 +00004430 be_cmd_reset_function(adapter);
4431
Sathya Perla82456b02010-02-17 01:35:37 +00004432 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004433}
4434
Sathya Perlacf588472010-02-14 21:22:01 +00004435static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4436 pci_channel_state_t state)
4437{
4438 struct be_adapter *adapter = pci_get_drvdata(pdev);
4439 struct net_device *netdev = adapter->netdev;
4440
4441 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4442
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004443 if (!adapter->eeh_error) {
4444 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004445
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004446 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004447
Sathya Perlacf588472010-02-14 21:22:01 +00004448 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004449 netif_device_detach(netdev);
4450 if (netif_running(netdev))
4451 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004452 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004453
4454 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004455 }
Sathya Perlacf588472010-02-14 21:22:01 +00004456
4457 if (state == pci_channel_io_perm_failure)
4458 return PCI_ERS_RESULT_DISCONNECT;
4459
4460 pci_disable_device(pdev);
4461
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004462 /* The error could cause the FW to trigger a flash debug dump.
4463 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004464 * can cause it not to recover; wait for it to finish.
4465 * Wait only for first function as it is needed only once per
4466 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004467 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004468 if (pdev->devfn == 0)
4469 ssleep(30);
4470
Sathya Perlacf588472010-02-14 21:22:01 +00004471 return PCI_ERS_RESULT_NEED_RESET;
4472}
4473
4474static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4475{
4476 struct be_adapter *adapter = pci_get_drvdata(pdev);
4477 int status;
4478
4479 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004480
4481 status = pci_enable_device(pdev);
4482 if (status)
4483 return PCI_ERS_RESULT_DISCONNECT;
4484
4485 pci_set_master(pdev);
4486 pci_set_power_state(pdev, 0);
4487 pci_restore_state(pdev);
4488
4489 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004490 dev_info(&adapter->pdev->dev,
4491 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004492 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004493 if (status)
4494 return PCI_ERS_RESULT_DISCONNECT;
4495
Sathya Perlad6b6d982012-09-05 01:56:48 +00004496 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004497 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004498 return PCI_ERS_RESULT_RECOVERED;
4499}
4500
4501static void be_eeh_resume(struct pci_dev *pdev)
4502{
4503 int status = 0;
4504 struct be_adapter *adapter = pci_get_drvdata(pdev);
4505 struct net_device *netdev = adapter->netdev;
4506
4507 dev_info(&adapter->pdev->dev, "EEH resume\n");
4508
4509 pci_save_state(pdev);
4510
Kalesh AP2d177be2013-04-28 22:22:29 +00004511 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004512 if (status)
4513 goto err;
4514
Kalesh AP2d177be2013-04-28 22:22:29 +00004515 /* tell fw we're ready to fire cmds */
4516 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004517 if (status)
4518 goto err;
4519
Sathya Perlacf588472010-02-14 21:22:01 +00004520 status = be_setup(adapter);
4521 if (status)
4522 goto err;
4523
4524 if (netif_running(netdev)) {
4525 status = be_open(netdev);
4526 if (status)
4527 goto err;
4528 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004529
4530 schedule_delayed_work(&adapter->func_recovery_work,
4531 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004532 netif_device_attach(netdev);
4533 return;
4534err:
4535 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004536}
4537
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004538static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004539 .error_detected = be_eeh_err_detected,
4540 .slot_reset = be_eeh_reset,
4541 .resume = be_eeh_resume,
4542};
4543
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004544static struct pci_driver be_driver = {
4545 .name = DRV_NAME,
4546 .id_table = be_dev_ids,
4547 .probe = be_probe,
4548 .remove = be_remove,
4549 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004550 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004551 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004552 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004553};
4554
4555static int __init be_init_module(void)
4556{
Joe Perches8e95a202009-12-03 07:58:21 +00004557 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4558 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004559 printk(KERN_WARNING DRV_NAME
4560 " : Module param rx_frag_size must be 2048/4096/8192."
4561 " Using 2048\n");
4562 rx_frag_size = 2048;
4563 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004564
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004565 return pci_register_driver(&be_driver);
4566}
4567module_init(be_init_module);
4568
4569static void __exit be_exit_module(void)
4570{
4571 pci_unregister_driver(&be_driver);
4572}
4573module_exit(be_exit_module);