blob: a444110b060fd74361759be2f9f3a25a4f0fe095 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamc7bb15a2013-03-06 20:05:05 +00002 * Copyright (C) 2005 - 2013 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070024
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000028MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_LICENSE("GPL");
30
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla11ac75e2011-12-13 00:58:50 +000035static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070048 { 0 }
49};
50MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000051/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070052static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000053 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85};
86/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070087static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000088 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700111 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700121
Sathya Perla752961a2011-10-24 02:45:03 +0000122/* Is BE in a multi-channel mode */
123static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127}
128
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700129static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130{
131 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000135 mem->va = NULL;
136 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
Joe Perches1f9061d22013-03-15 07:23:58 +0000149 GFP_KERNEL | __GFP_ZERO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000151 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 return 0;
153}
154
Somnath Kotur68c45a22013-03-14 02:42:07 +0000155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169
Sathya Perladb3ea782011-08-22 19:41:52 +0000170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Somnath Kotur68c45a22013-03-14 02:42:07 +0000174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
Sathya Perla8788fdc2009-07-27 22:52:03 +0000190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700191{
192 u32 val = 0;
193 val |= qid & DB_RQ_RING_ID_MASK;
194 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000195
196 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000197 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700198}
199
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000200static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
201 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202{
203 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000206
207 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000208 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209}
210
Sathya Perla8788fdc2009-07-27 22:52:03 +0000211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700212 bool arm, bool clear_int, u16 num_popped)
213{
214 u32 val = 0;
215 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000216 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000218
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000219 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000220 return;
221
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700229}
230
Sathya Perla8788fdc2009-07-27 22:52:03 +0000231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232{
233 u32 val = 0;
234 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000235 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000237
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000238 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000239 return;
240
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241 if (arm)
242 val |= 1 << DB_CQ_REARM_SHIFT;
243 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000244 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245}
246
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247static int be_mac_addr_set(struct net_device *netdev, void *p)
248{
249 struct be_adapter *adapter = netdev_priv(netdev);
250 struct sockaddr *addr = p;
251 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000252 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000253 u32 pmac_id = adapter->pmac_id[0];
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000254 bool active_mac = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700255
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000256 if (!is_valid_ether_addr(addr->sa_data))
257 return -EADDRNOTAVAIL;
258
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000259 /* For BE VF, MAC address is already activated by PF.
260 * Hence only operation left is updating netdev->devaddr.
261 * Update it if user is passing the same MAC which was used
262 * during configuring VF MAC from PF(Hypervisor).
263 */
264 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
265 status = be_cmd_mac_addr_query(adapter, current_mac,
266 false, adapter->if_handle, 0);
267 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
268 goto done;
269 else
270 goto err;
271 }
272
273 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
274 goto done;
275
276 /* For Lancer check if any MAC is active.
277 * If active, get its mac id.
278 */
279 if (lancer_chip(adapter) && !be_physfn(adapter))
280 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
281 &pmac_id, 0);
282
283 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
284 adapter->if_handle,
285 &adapter->pmac_id[0], 0);
286
Sathya Perlaa65027e2009-08-17 00:58:04 +0000287 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000288 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700289
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000290 if (active_mac)
291 be_cmd_pmac_del(adapter, adapter->if_handle,
292 pmac_id, 0);
293done:
Somnath Koture3a7ae22011-10-27 07:14:05 +0000294 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
295 return 0;
296err:
297 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700298 return status;
299}
300
Sathya Perlaca34fe32012-11-06 17:48:56 +0000301/* BE2 supports only v0 cmd */
302static void *hw_stats_from_cmd(struct be_adapter *adapter)
303{
304 if (BE2_chip(adapter)) {
305 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
306
307 return &cmd->hw_stats;
308 } else {
309 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
310
311 return &cmd->hw_stats;
312 }
313}
314
315/* BE2 supports only v0 cmd */
316static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
317{
318 if (BE2_chip(adapter)) {
319 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
320
321 return &hw_stats->erx;
322 } else {
323 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
324
325 return &hw_stats->erx;
326 }
327}
328
329static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000330{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000334 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000335 &rxf_stats->port[adapter->port_num];
336 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000337
Sathya Perlaac124ff2011-07-25 19:10:14 +0000338 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000339 drvs->rx_pause_frames = port_stats->rx_pause_frames;
340 drvs->rx_crc_errors = port_stats->rx_crc_errors;
341 drvs->rx_control_frames = port_stats->rx_control_frames;
342 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000354 drvs->rx_dropped_header_too_small =
355 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000356 drvs->rx_address_filtered =
357 port_stats->rx_address_filtered +
358 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000359 drvs->rx_alignment_symbol_errors =
360 port_stats->rx_alignment_symbol_errors;
361
362 drvs->tx_pauseframes = port_stats->tx_pauseframes;
363 drvs->tx_controlframes = port_stats->tx_controlframes;
364
365 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000366 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000367 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000368 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000369 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000371 drvs->forwarded_packets = rxf_stats->forwarded_packets;
372 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000373 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000375 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
376}
377
Sathya Perlaca34fe32012-11-06 17:48:56 +0000378static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000379{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000380 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000383 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 &rxf_stats->port[adapter->port_num];
385 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386
Sathya Perlaac124ff2011-07-25 19:10:14 +0000387 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000388 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390 drvs->rx_pause_frames = port_stats->rx_pause_frames;
391 drvs->rx_crc_errors = port_stats->rx_crc_errors;
392 drvs->rx_control_frames = port_stats->rx_control_frames;
393 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403 drvs->rx_dropped_header_too_small =
404 port_stats->rx_dropped_header_too_small;
405 drvs->rx_input_fifo_overflow_drop =
406 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000407 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000408 drvs->rx_alignment_symbol_errors =
409 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000410 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000411 drvs->tx_pauseframes = port_stats->tx_pauseframes;
412 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000413 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000414 drvs->jabber_events = port_stats->jabber_events;
415 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000416 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000417 drvs->forwarded_packets = rxf_stats->forwarded_packets;
418 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000419 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
420 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
422}
423
Selvin Xavier005d5692011-05-16 07:36:35 +0000424static void populate_lancer_stats(struct be_adapter *adapter)
425{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000426
Selvin Xavier005d5692011-05-16 07:36:35 +0000427 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000428 struct lancer_pport_stats *pport_stats =
429 pport_stats_from_cmd(adapter);
430
431 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
432 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
433 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
434 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000435 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000436 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000437 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
438 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
439 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
440 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
441 drvs->rx_dropped_tcp_length =
442 pport_stats->rx_dropped_invalid_tcp_length;
443 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
444 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
445 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
446 drvs->rx_dropped_header_too_small =
447 pport_stats->rx_dropped_header_too_small;
448 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000449 drvs->rx_address_filtered =
450 pport_stats->rx_address_filtered +
451 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000452 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000453 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000454 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
455 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000456 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000457 drvs->forwarded_packets = pport_stats->num_forwards_lo;
458 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000459 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000460 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000461}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000462
Sathya Perla09c1c682011-08-22 19:41:53 +0000463static void accumulate_16bit_val(u32 *acc, u16 val)
464{
465#define lo(x) (x & 0xFFFF)
466#define hi(x) (x & 0xFFFF0000)
467 bool wrapped = val < lo(*acc);
468 u32 newacc = hi(*acc) + val;
469
470 if (wrapped)
471 newacc += 65536;
472 ACCESS_ONCE(*acc) = newacc;
473}
474
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000475void populate_erx_stats(struct be_adapter *adapter,
476 struct be_rx_obj *rxo,
477 u32 erx_stat)
478{
479 if (!BEx_chip(adapter))
480 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
481 else
482 /* below erx HW counter can actually wrap around after
483 * 65535. Driver accumulates a 32-bit value
484 */
485 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
486 (u16)erx_stat);
487}
488
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000489void be_parse_stats(struct be_adapter *adapter)
490{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000491 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
492 struct be_rx_obj *rxo;
493 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000494 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000495
Sathya Perlaca34fe32012-11-06 17:48:56 +0000496 if (lancer_chip(adapter)) {
497 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000498 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000499 if (BE2_chip(adapter))
500 populate_be_v0_stats(adapter);
501 else
502 /* for BE3 and Skyhawk */
503 populate_be_v1_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000504
Sathya Perlaca34fe32012-11-06 17:48:56 +0000505 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
506 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000507 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
508 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000509 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000510 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000511}
512
Sathya Perlaab1594e2011-07-25 19:10:15 +0000513static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
514 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700515{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000516 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000517 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700518 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000519 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000520 u64 pkts, bytes;
521 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700522 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700523
Sathya Perla3abcded2010-10-03 22:12:27 -0700524 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000525 const struct be_rx_stats *rx_stats = rx_stats(rxo);
526 do {
527 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
528 pkts = rx_stats(rxo)->rx_pkts;
529 bytes = rx_stats(rxo)->rx_bytes;
530 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
531 stats->rx_packets += pkts;
532 stats->rx_bytes += bytes;
533 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
534 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
535 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700536 }
537
Sathya Perla3c8def92011-06-12 20:01:58 +0000538 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000539 const struct be_tx_stats *tx_stats = tx_stats(txo);
540 do {
541 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
542 pkts = tx_stats(txo)->tx_pkts;
543 bytes = tx_stats(txo)->tx_bytes;
544 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
545 stats->tx_packets += pkts;
546 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000547 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700548
549 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000550 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000551 drvs->rx_alignment_symbol_errors +
552 drvs->rx_in_range_errors +
553 drvs->rx_out_range_errors +
554 drvs->rx_frame_too_long +
555 drvs->rx_dropped_too_small +
556 drvs->rx_dropped_too_short +
557 drvs->rx_dropped_header_too_small +
558 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000559 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700560
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700561 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000562 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000563 drvs->rx_out_range_errors +
564 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000565
Sathya Perlaab1594e2011-07-25 19:10:15 +0000566 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700567
568 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000569 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000570
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700571 /* receiver fifo overrun */
572 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000573 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000574 drvs->rx_input_fifo_overflow_drop +
575 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000576 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700577}
578
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000579void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700580{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700581 struct net_device *netdev = adapter->netdev;
582
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000583 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000584 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000585 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700586 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000587
588 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
589 netif_carrier_on(netdev);
590 else
591 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700592}
593
Sathya Perla3c8def92011-06-12 20:01:58 +0000594static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000595 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700596{
Sathya Perla3c8def92011-06-12 20:01:58 +0000597 struct be_tx_stats *stats = tx_stats(txo);
598
Sathya Perlaab1594e2011-07-25 19:10:15 +0000599 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000600 stats->tx_reqs++;
601 stats->tx_wrbs += wrb_cnt;
602 stats->tx_bytes += copied;
603 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700604 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000605 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000606 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700607}
608
609/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000610static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
611 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700612{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700613 int cnt = (skb->len > skb->data_len);
614
615 cnt += skb_shinfo(skb)->nr_frags;
616
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700617 /* to account for hdr wrb */
618 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000619 if (lancer_chip(adapter) || !(cnt & 1)) {
620 *dummy = false;
621 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700622 /* add a dummy to make it an even num */
623 cnt++;
624 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000625 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700626 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
627 return cnt;
628}
629
630static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
631{
632 wrb->frag_pa_hi = upper_32_bits(addr);
633 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
634 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000635 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636}
637
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000638static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
639 struct sk_buff *skb)
640{
641 u8 vlan_prio;
642 u16 vlan_tag;
643
644 vlan_tag = vlan_tx_tag_get(skb);
645 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
646 /* If vlan priority provided by OS is NOT in available bmap */
647 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
648 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
649 adapter->recommended_prio;
650
651 return vlan_tag;
652}
653
Somnath Koturcc4ce022010-10-21 07:11:14 -0700654static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000655 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700656{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000657 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700658
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700659 memset(hdr, 0, sizeof(*hdr));
660
661 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
662
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000663 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
666 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000667 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000668 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700669 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
670 if (is_tcp_pkt(skb))
671 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
672 else if (is_udp_pkt(skb))
673 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
674 }
675
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700676 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700677 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000678 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700679 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700680 }
681
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000682 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
683 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700685 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
686 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
687}
688
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000689static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000690 bool unmap_single)
691{
692 dma_addr_t dma;
693
694 be_dws_le_to_cpu(wrb, sizeof(*wrb));
695
696 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000697 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000698 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000699 dma_unmap_single(dev, dma, wrb->frag_len,
700 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000701 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000702 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000703 }
704}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700705
Sathya Perla3c8def92011-06-12 20:01:58 +0000706static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000707 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
708 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700709{
Sathya Perla7101e112010-03-22 20:41:12 +0000710 dma_addr_t busaddr;
711 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000712 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700713 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700714 struct be_eth_wrb *wrb;
715 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000716 bool map_single = false;
717 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700718
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700719 hdr = queue_head_node(txq);
720 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000721 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700722
David S. Millerebc8d2a2009-06-09 01:01:31 -0700723 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700724 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000725 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
726 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000727 goto dma_err;
728 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700729 wrb = queue_head_node(txq);
730 wrb_fill(wrb, busaddr, len);
731 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732 queue_head_inc(txq);
733 copied += len;
734 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700735
David S. Millerebc8d2a2009-06-09 01:01:31 -0700736 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000737 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700738 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000739 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000740 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000741 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000742 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700743 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000744 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700745 be_dws_cpu_to_le(wrb, sizeof(*wrb));
746 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000747 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700748 }
749
750 if (dummy_wrb) {
751 wrb = queue_head_node(txq);
752 wrb_fill(wrb, 0, 0);
753 be_dws_cpu_to_le(wrb, sizeof(*wrb));
754 queue_head_inc(txq);
755 }
756
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000757 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700758 be_dws_cpu_to_le(hdr, sizeof(*hdr));
759
760 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000761dma_err:
762 txq->head = map_head;
763 while (copied) {
764 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000765 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000766 map_single = false;
767 copied -= wrb->frag_len;
768 queue_head_inc(txq);
769 }
770 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700771}
772
Somnath Kotur93040ae2012-06-26 22:32:10 +0000773static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000774 struct sk_buff *skb,
775 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000776{
777 u16 vlan_tag = 0;
778
779 skb = skb_share_check(skb, GFP_ATOMIC);
780 if (unlikely(!skb))
781 return skb;
782
783 if (vlan_tx_tag_present(skb)) {
784 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
David S. Miller6e0895c2013-04-22 20:32:51 -0400785 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ivan Veceraf11a8692013-04-12 16:49:24 +0200786 if (skb)
787 skb->vlan_tci = 0;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000788 }
789
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000790 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
791 if (!vlan_tag)
792 vlan_tag = adapter->pvid;
793 if (skip_hw_vlan)
794 *skip_hw_vlan = true;
795 }
796
797 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400798 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000799 if (unlikely(!skb))
800 return skb;
801
802 skb->vlan_tci = 0;
803 }
804
805 /* Insert the outer VLAN, if any */
806 if (adapter->qnq_vid) {
807 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400808 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000809 if (unlikely(!skb))
810 return skb;
811 if (skip_hw_vlan)
812 *skip_hw_vlan = true;
813 }
814
Somnath Kotur93040ae2012-06-26 22:32:10 +0000815 return skb;
816}
817
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000818static bool be_ipv6_exthdr_check(struct sk_buff *skb)
819{
820 struct ethhdr *eh = (struct ethhdr *)skb->data;
821 u16 offset = ETH_HLEN;
822
823 if (eh->h_proto == htons(ETH_P_IPV6)) {
824 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
825
826 offset += sizeof(struct ipv6hdr);
827 if (ip6h->nexthdr != NEXTHDR_TCP &&
828 ip6h->nexthdr != NEXTHDR_UDP) {
829 struct ipv6_opt_hdr *ehdr =
830 (struct ipv6_opt_hdr *) (skb->data + offset);
831
832 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
833 if (ehdr->hdrlen == 0xff)
834 return true;
835 }
836 }
837 return false;
838}
839
840static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
841{
842 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
843}
844
845static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
846{
847 return BE3_chip(adapter) &&
848 be_ipv6_exthdr_check(skb);
849}
850
Stephen Hemminger613573252009-08-31 19:50:58 +0000851static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700852 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700853{
854 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000855 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
856 struct be_queue_info *txq = &txo->q;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000857 struct iphdr *ip = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700858 u32 wrb_cnt = 0, copied = 0;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000859 u32 start = txq->head, eth_hdr_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700860 bool dummy_wrb, stopped = false;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000861 bool skip_hw_vlan = false;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000862 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700863
Somnath Kotur93040ae2012-06-26 22:32:10 +0000864 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
865 VLAN_ETH_HLEN : ETH_HLEN;
866
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000867 /* For padded packets, BE HW modifies tot_len field in IP header
868 * incorrecly when VLAN tag is inserted by HW.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000869 */
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000870 if (skb->len <= 60 && vlan_tx_tag_present(skb) && is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000871 ip = (struct iphdr *)ip_hdr(skb);
872 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
873 }
874
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000875 /* If vlan tag is already inlined in the packet, skip HW VLAN
876 * tagging in UMC mode
877 */
878 if ((adapter->function_mode & UMC_ENABLED) &&
879 veh->h_vlan_proto == htons(ETH_P_8021Q))
880 skip_hw_vlan = true;
881
Somnath Kotur93040ae2012-06-26 22:32:10 +0000882 /* HW has a bug wherein it will calculate CSUM for VLAN
883 * pkts even though it is disabled.
884 * Manually insert VLAN in pkt.
885 */
886 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000887 vlan_tx_tag_present(skb)) {
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000888 skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
889 if (unlikely(!skb))
890 goto tx_drop;
891 }
892
893 /* HW may lockup when VLAN HW tagging is requested on
894 * certain ipv6 packets. Drop such pkts if the HW workaround to
895 * skip HW tagging is not enabled by FW.
896 */
897 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
898 (adapter->pvid || adapter->qnq_vid) &&
899 !qnq_async_evt_rcvd(adapter)))
900 goto tx_drop;
901
902 /* Manual VLAN tag insertion to prevent:
903 * ASIC lockup when the ASIC inserts VLAN tag into
904 * certain ipv6 packets. Insert VLAN tags in driver,
905 * and set event, completion, vlan bits accordingly
906 * in the Tx WRB.
907 */
908 if (be_ipv6_tx_stall_chk(adapter, skb) &&
909 be_vlan_tag_tx_chk(adapter, skb)) {
910 skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000911 if (unlikely(!skb))
912 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000913 }
914
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000915 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700916
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000917 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
918 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000919 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000920 int gso_segs = skb_shinfo(skb)->gso_segs;
921
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000922 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000923 BUG_ON(txo->sent_skb_list[start]);
924 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700925
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000926 /* Ensure txq has space for the next skb; Else stop the queue
927 * *BEFORE* ringing the tx doorbell, so that we serialze the
928 * tx compls of the current transmit which'll wake up the queue
929 */
Sathya Perla7101e112010-03-22 20:41:12 +0000930 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000931 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
932 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000933 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000934 stopped = true;
935 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700936
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000937 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000938
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000939 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000940 } else {
941 txq->head = start;
942 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700943 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000944tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700945 return NETDEV_TX_OK;
946}
947
948static int be_change_mtu(struct net_device *netdev, int new_mtu)
949{
950 struct be_adapter *adapter = netdev_priv(netdev);
951 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000952 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
953 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700954 dev_info(&adapter->pdev->dev,
955 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000956 BE_MIN_MTU,
957 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700958 return -EINVAL;
959 }
960 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
961 netdev->mtu, new_mtu);
962 netdev->mtu = new_mtu;
963 return 0;
964}
965
966/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000967 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
968 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700969 */
Sathya Perla10329df2012-06-05 19:37:18 +0000970static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700971{
Sathya Perla10329df2012-06-05 19:37:18 +0000972 u16 vids[BE_NUM_VLANS_SUPPORTED];
973 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000974 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000975
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000976 /* No need to further configure vids if in promiscuous mode */
977 if (adapter->promiscuous)
978 return 0;
979
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000980 if (adapter->vlans_added > adapter->max_vlans)
981 goto set_vlan_promisc;
982
983 /* Construct VLAN Table to give to HW */
984 for (i = 0; i < VLAN_N_VID; i++)
985 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +0000986 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000987
988 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +0000989 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000990
991 /* Set to VLAN promisc mode as setting VLAN filter failed */
992 if (status) {
993 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
994 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
995 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700996 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000997
Sathya Perlab31c50a2009-09-17 10:30:13 -0700998 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000999
1000set_vlan_promisc:
1001 status = be_cmd_vlan_config(adapter, adapter->if_handle,
1002 NULL, 0, 1, 1);
1003 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001004}
1005
Patrick McHardy80d5c362013-04-19 02:04:28 +00001006static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001007{
1008 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001009 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001010
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001011 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001012 status = -EINVAL;
1013 goto ret;
1014 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001015
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001016 /* Packets with VID 0 are always received by Lancer by default */
1017 if (lancer_chip(adapter) && vid == 0)
1018 goto ret;
1019
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001020 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001021 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Sathya Perla10329df2012-06-05 19:37:18 +00001022 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001023
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001024 if (!status)
1025 adapter->vlans_added++;
1026 else
1027 adapter->vlan_tag[vid] = 0;
1028ret:
1029 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001030}
1031
Patrick McHardy80d5c362013-04-19 02:04:28 +00001032static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001033{
1034 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001035 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001036
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001037 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001038 status = -EINVAL;
1039 goto ret;
1040 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001041
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001042 /* Packets with VID 0 are always received by Lancer by default */
1043 if (lancer_chip(adapter) && vid == 0)
1044 goto ret;
1045
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001046 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001047 if (adapter->vlans_added <= adapter->max_vlans)
Sathya Perla10329df2012-06-05 19:37:18 +00001048 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001049
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001050 if (!status)
1051 adapter->vlans_added--;
1052 else
1053 adapter->vlan_tag[vid] = 1;
1054ret:
1055 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001056}
1057
Sathya Perlaa54769f2011-10-24 02:45:00 +00001058static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001059{
1060 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001061 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001062
1063 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001064 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001065 adapter->promiscuous = true;
1066 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001067 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001068
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001069 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001070 if (adapter->promiscuous) {
1071 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +00001072 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001073
1074 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001075 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001076 }
1077
Sathya Perlae7b909a2009-11-22 22:01:10 +00001078 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001079 if (netdev->flags & IFF_ALLMULTI ||
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001080 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001081 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001082 goto done;
1083 }
1084
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001085 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1086 struct netdev_hw_addr *ha;
1087 int i = 1; /* First slot is claimed by the Primary MAC */
1088
1089 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1090 be_cmd_pmac_del(adapter, adapter->if_handle,
1091 adapter->pmac_id[i], 0);
1092 }
1093
1094 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
1095 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1096 adapter->promiscuous = true;
1097 goto done;
1098 }
1099
1100 netdev_for_each_uc_addr(ha, adapter->netdev) {
1101 adapter->uc_macs++; /* First slot is for Primary MAC */
1102 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1103 adapter->if_handle,
1104 &adapter->pmac_id[adapter->uc_macs], 0);
1105 }
1106 }
1107
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001108 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1109
1110 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1111 if (status) {
1112 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1113 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1114 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1115 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001116done:
1117 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001118}
1119
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001120static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1121{
1122 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001123 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001124 int status;
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +00001125 bool active_mac = false;
1126 u32 pmac_id;
1127 u8 old_mac[ETH_ALEN];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001128
Sathya Perla11ac75e2011-12-13 00:58:50 +00001129 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001130 return -EPERM;
1131
Sathya Perla11ac75e2011-12-13 00:58:50 +00001132 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001133 return -EINVAL;
1134
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001135 if (lancer_chip(adapter)) {
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +00001136 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1137 &pmac_id, vf + 1);
1138 if (!status && active_mac)
1139 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1140 pmac_id, vf + 1);
1141
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001142 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1143 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +00001144 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1145 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001146
Sathya Perla11ac75e2011-12-13 00:58:50 +00001147 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1148 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001149 }
1150
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001151 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001152 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1153 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001154 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001155 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001156
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001157 return status;
1158}
1159
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001160static int be_get_vf_config(struct net_device *netdev, int vf,
1161 struct ifla_vf_info *vi)
1162{
1163 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001164 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001165
Sathya Perla11ac75e2011-12-13 00:58:50 +00001166 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001167 return -EPERM;
1168
Sathya Perla11ac75e2011-12-13 00:58:50 +00001169 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001170 return -EINVAL;
1171
1172 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001173 vi->tx_rate = vf_cfg->tx_rate;
1174 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001175 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001176 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001177
1178 return 0;
1179}
1180
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001181static int be_set_vf_vlan(struct net_device *netdev,
1182 int vf, u16 vlan, u8 qos)
1183{
1184 struct be_adapter *adapter = netdev_priv(netdev);
1185 int status = 0;
1186
Sathya Perla11ac75e2011-12-13 00:58:50 +00001187 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001188 return -EPERM;
1189
Sathya Perla11ac75e2011-12-13 00:58:50 +00001190 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001191 return -EINVAL;
1192
1193 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001194 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1195 /* If this is new value, program it. Else skip. */
1196 adapter->vf_cfg[vf].vlan_tag = vlan;
1197
1198 status = be_cmd_set_hsw_config(adapter, vlan,
1199 vf + 1, adapter->vf_cfg[vf].if_handle);
1200 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001201 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001202 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001203 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001204 vlan = adapter->vf_cfg[vf].def_vid;
1205 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1206 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001207 }
1208
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001209
1210 if (status)
1211 dev_info(&adapter->pdev->dev,
1212 "VLAN %d config on VF %d failed\n", vlan, vf);
1213 return status;
1214}
1215
Ajit Khapardee1d18732010-07-23 01:52:13 +00001216static int be_set_vf_tx_rate(struct net_device *netdev,
1217 int vf, int rate)
1218{
1219 struct be_adapter *adapter = netdev_priv(netdev);
1220 int status = 0;
1221
Sathya Perla11ac75e2011-12-13 00:58:50 +00001222 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001223 return -EPERM;
1224
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001225 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001226 return -EINVAL;
1227
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001228 if (rate < 100 || rate > 10000) {
1229 dev_err(&adapter->pdev->dev,
1230 "tx rate must be between 100 and 10000 Mbps\n");
1231 return -EINVAL;
1232 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001233
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001234 if (lancer_chip(adapter))
1235 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1236 else
1237 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001238
1239 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001240 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001241 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001242 else
1243 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001244 return status;
1245}
1246
Sathya Perla39f1d942012-05-08 19:41:24 +00001247static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1248{
1249 struct pci_dev *dev, *pdev = adapter->pdev;
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001250 int vfs = 0, assigned_vfs = 0, pos;
Sathya Perla39f1d942012-05-08 19:41:24 +00001251 u16 offset, stride;
1252
1253 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
Sathya Perlad79c0a22012-06-05 19:37:22 +00001254 if (!pos)
1255 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00001256 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1257 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1258
1259 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1260 while (dev) {
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001261 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
Sathya Perla39f1d942012-05-08 19:41:24 +00001262 vfs++;
1263 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1264 assigned_vfs++;
1265 }
1266 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1267 }
1268 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1269}
1270
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001271static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001272{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001273 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001274 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001275 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001276 u64 pkts;
1277 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001278
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001279 if (!eqo->enable_aic) {
1280 eqd = eqo->eqd;
1281 goto modify_eqd;
1282 }
1283
1284 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001285 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001286
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001287 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1288
Sathya Perla4097f662009-03-24 16:40:13 -07001289 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001290 if (time_before(now, stats->rx_jiffies)) {
1291 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001292 return;
1293 }
1294
Sathya Perlaac124ff2011-07-25 19:10:14 +00001295 /* Update once a second */
1296 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001297 return;
1298
Sathya Perlaab1594e2011-07-25 19:10:15 +00001299 do {
1300 start = u64_stats_fetch_begin_bh(&stats->sync);
1301 pkts = stats->rx_pkts;
1302 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1303
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001304 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001305 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001306 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001307 eqd = (stats->rx_pps / 110000) << 3;
1308 eqd = min(eqd, eqo->max_eqd);
1309 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001310 if (eqd < 10)
1311 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001312
1313modify_eqd:
1314 if (eqd != eqo->cur_eqd) {
1315 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1316 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001317 }
Sathya Perla4097f662009-03-24 16:40:13 -07001318}
1319
Sathya Perla3abcded2010-10-03 22:12:27 -07001320static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001321 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001322{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001323 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001324
Sathya Perlaab1594e2011-07-25 19:10:15 +00001325 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001326 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001327 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001328 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001329 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001330 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001331 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001332 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001333 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001334}
1335
Sathya Perla2e588f82011-03-11 02:49:26 +00001336static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001337{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001338 /* L4 checksum is not reliable for non TCP/UDP packets.
1339 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001340 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1341 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001342}
1343
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001344static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1345 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001346{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001347 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001348 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001349 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001350
Sathya Perla3abcded2010-10-03 22:12:27 -07001351 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001352 BUG_ON(!rx_page_info->page);
1353
Ajit Khaparde205859a2010-02-09 01:34:21 +00001354 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001355 dma_unmap_page(&adapter->pdev->dev,
1356 dma_unmap_addr(rx_page_info, bus),
1357 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001358 rx_page_info->last_page_user = false;
1359 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001360
1361 atomic_dec(&rxq->used);
1362 return rx_page_info;
1363}
1364
1365/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001366static void be_rx_compl_discard(struct be_rx_obj *rxo,
1367 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001368{
Sathya Perla3abcded2010-10-03 22:12:27 -07001369 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001370 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001371 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001372
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001373 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001374 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001375 put_page(page_info->page);
1376 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001377 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001378 }
1379}
1380
1381/*
1382 * skb_fill_rx_data forms a complete skb for an ether frame
1383 * indicated by rxcp.
1384 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001385static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1386 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001387{
Sathya Perla3abcded2010-10-03 22:12:27 -07001388 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001389 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001390 u16 i, j;
1391 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001392 u8 *start;
1393
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001394 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001395 start = page_address(page_info->page) + page_info->page_offset;
1396 prefetch(start);
1397
1398 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001399 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001400
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001401 skb->len = curr_frag_len;
1402 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001403 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001404 /* Complete packet has now been moved to data */
1405 put_page(page_info->page);
1406 skb->data_len = 0;
1407 skb->tail += curr_frag_len;
1408 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001409 hdr_len = ETH_HLEN;
1410 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001411 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001412 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001413 skb_shinfo(skb)->frags[0].page_offset =
1414 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001415 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001416 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001417 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001418 skb->tail += hdr_len;
1419 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001420 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001421
Sathya Perla2e588f82011-03-11 02:49:26 +00001422 if (rxcp->pkt_size <= rx_frag_size) {
1423 BUG_ON(rxcp->num_rcvd != 1);
1424 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001425 }
1426
1427 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001428 index_inc(&rxcp->rxq_idx, rxq->len);
1429 remaining = rxcp->pkt_size - curr_frag_len;
1430 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001431 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001432 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001433
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001434 /* Coalesce all frags from the same physical page in one slot */
1435 if (page_info->page_offset == 0) {
1436 /* Fresh page */
1437 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001438 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001439 skb_shinfo(skb)->frags[j].page_offset =
1440 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001441 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001442 skb_shinfo(skb)->nr_frags++;
1443 } else {
1444 put_page(page_info->page);
1445 }
1446
Eric Dumazet9e903e02011-10-18 21:00:24 +00001447 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001448 skb->len += curr_frag_len;
1449 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001450 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001451 remaining -= curr_frag_len;
1452 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001453 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001454 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001455 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001456}
1457
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001458/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001459static void be_rx_compl_process(struct be_rx_obj *rxo,
1460 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001461{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001462 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001463 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001464 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001465
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001466 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001467 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001468 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001469 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001470 return;
1471 }
1472
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001473 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001474
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001475 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001476 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001477 else
1478 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001479
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001480 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001481 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001482 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001483 skb->rxhash = rxcp->rss_hash;
1484
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001485
Jiri Pirko343e43c2011-08-25 02:50:51 +00001486 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001487 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001488
1489 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001490}
1491
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001492/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001493void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1494 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001495{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001496 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001497 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001498 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001499 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001500 u16 remaining, curr_frag_len;
1501 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001502
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001503 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001504 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001505 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001506 return;
1507 }
1508
Sathya Perla2e588f82011-03-11 02:49:26 +00001509 remaining = rxcp->pkt_size;
1510 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001511 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001512
1513 curr_frag_len = min(remaining, rx_frag_size);
1514
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001515 /* Coalesce all frags from the same physical page in one slot */
1516 if (i == 0 || page_info->page_offset == 0) {
1517 /* First frag or Fresh page */
1518 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001519 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001520 skb_shinfo(skb)->frags[j].page_offset =
1521 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001522 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001523 } else {
1524 put_page(page_info->page);
1525 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001526 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001527 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001528 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001529 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001530 memset(page_info, 0, sizeof(*page_info));
1531 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001532 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001533
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001534 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001535 skb->len = rxcp->pkt_size;
1536 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001537 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001538 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001539 if (adapter->netdev->features & NETIF_F_RXHASH)
1540 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001541
Jiri Pirko343e43c2011-08-25 02:50:51 +00001542 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001543 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001544
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001545 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001546}
1547
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001548static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1549 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001550{
Sathya Perla2e588f82011-03-11 02:49:26 +00001551 rxcp->pkt_size =
1552 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1553 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1554 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1555 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001556 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001557 rxcp->ip_csum =
1558 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1559 rxcp->l4_csum =
1560 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1561 rxcp->ipv6 =
1562 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1563 rxcp->rxq_idx =
1564 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1565 rxcp->num_rcvd =
1566 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1567 rxcp->pkt_type =
1568 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001569 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001570 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001571 if (rxcp->vlanf) {
1572 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001573 compl);
1574 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1575 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001576 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001577 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001578}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001579
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001580static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1581 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001582{
1583 rxcp->pkt_size =
1584 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1585 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1586 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1587 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001588 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001589 rxcp->ip_csum =
1590 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1591 rxcp->l4_csum =
1592 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1593 rxcp->ipv6 =
1594 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1595 rxcp->rxq_idx =
1596 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1597 rxcp->num_rcvd =
1598 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1599 rxcp->pkt_type =
1600 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001601 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001602 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001603 if (rxcp->vlanf) {
1604 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001605 compl);
1606 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1607 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001608 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001609 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001610}
1611
1612static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1613{
1614 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1615 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1616 struct be_adapter *adapter = rxo->adapter;
1617
1618 /* For checking the valid bit it is Ok to use either definition as the
1619 * valid bit is at the same position in both v0 and v1 Rx compl */
1620 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001621 return NULL;
1622
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001623 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001624 be_dws_le_to_cpu(compl, sizeof(*compl));
1625
1626 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001627 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001628 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001629 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001630
Sathya Perla15d72182011-03-21 20:49:26 +00001631 if (rxcp->vlanf) {
1632 /* vlanf could be wrongly set in some cards.
1633 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001634 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001635 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001636
Sathya Perla15d72182011-03-21 20:49:26 +00001637 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001638 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001639
Somnath Kotur939cf302011-08-18 21:51:49 -07001640 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001641 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001642 rxcp->vlanf = 0;
1643 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001644
1645 /* As the compl has been parsed, reset it; we wont touch it again */
1646 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001647
Sathya Perla3abcded2010-10-03 22:12:27 -07001648 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001649 return rxcp;
1650}
1651
Eric Dumazet1829b082011-03-01 05:48:12 +00001652static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001653{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001654 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001655
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001656 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001657 gfp |= __GFP_COMP;
1658 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001659}
1660
1661/*
1662 * Allocate a page, split it to fragments of size rx_frag_size and post as
1663 * receive buffers to BE
1664 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001665static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001666{
Sathya Perla3abcded2010-10-03 22:12:27 -07001667 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001668 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001669 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001670 struct page *pagep = NULL;
1671 struct be_eth_rx_d *rxd;
1672 u64 page_dmaaddr = 0, frag_dmaaddr;
1673 u32 posted, page_offset = 0;
1674
Sathya Perla3abcded2010-10-03 22:12:27 -07001675 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001676 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1677 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001678 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001679 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001680 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001681 break;
1682 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001683 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1684 0, adapter->big_page_size,
1685 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001686 page_info->page_offset = 0;
1687 } else {
1688 get_page(pagep);
1689 page_info->page_offset = page_offset + rx_frag_size;
1690 }
1691 page_offset = page_info->page_offset;
1692 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001693 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001694 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1695
1696 rxd = queue_head_node(rxq);
1697 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1698 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001699
1700 /* Any space left in the current big page for another frag? */
1701 if ((page_offset + rx_frag_size + rx_frag_size) >
1702 adapter->big_page_size) {
1703 pagep = NULL;
1704 page_info->last_page_user = true;
1705 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001706
1707 prev_page_info = page_info;
1708 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001709 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001710 }
1711 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001712 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001713
1714 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001715 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001716 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001717 } else if (atomic_read(&rxq->used) == 0) {
1718 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001719 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001720 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001721}
1722
Sathya Perla5fb379e2009-06-18 00:02:59 +00001723static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001724{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001725 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1726
1727 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1728 return NULL;
1729
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001730 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001731 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1732
1733 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1734
1735 queue_tail_inc(tx_cq);
1736 return txcp;
1737}
1738
Sathya Perla3c8def92011-06-12 20:01:58 +00001739static u16 be_tx_compl_process(struct be_adapter *adapter,
1740 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001741{
Sathya Perla3c8def92011-06-12 20:01:58 +00001742 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001743 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001744 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001745 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001746 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1747 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001748
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001749 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001750 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001751 sent_skbs[txq->tail] = NULL;
1752
1753 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001754 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001755
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001756 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001757 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001758 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001759 unmap_tx_frag(&adapter->pdev->dev, wrb,
1760 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001761 unmap_skb_hdr = false;
1762
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001763 num_wrbs++;
1764 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001765 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001766
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001767 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001768 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001769}
1770
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001771/* Return the number of events in the event queue */
1772static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001773{
1774 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001775 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001776
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001777 do {
1778 eqe = queue_tail_node(&eqo->q);
1779 if (eqe->evt == 0)
1780 break;
1781
1782 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001783 eqe->evt = 0;
1784 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001785 queue_tail_inc(&eqo->q);
1786 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001787
1788 return num;
1789}
1790
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001791/* Leaves the EQ is disarmed state */
1792static void be_eq_clean(struct be_eq_obj *eqo)
1793{
1794 int num = events_get(eqo);
1795
1796 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1797}
1798
1799static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001800{
1801 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001802 struct be_queue_info *rxq = &rxo->q;
1803 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001804 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00001805 struct be_adapter *adapter = rxo->adapter;
1806 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001807 u16 tail;
1808
Sathya Perlad23e9462012-12-17 19:38:51 +00001809 /* Consume pending rx completions.
1810 * Wait for the flush completion (identified by zero num_rcvd)
1811 * to arrive. Notify CQ even when there are no more CQ entries
1812 * for HW to flush partially coalesced CQ entries.
1813 * In Lancer, there is no need to wait for flush compl.
1814 */
1815 for (;;) {
1816 rxcp = be_rx_compl_get(rxo);
1817 if (rxcp == NULL) {
1818 if (lancer_chip(adapter))
1819 break;
1820
1821 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1822 dev_warn(&adapter->pdev->dev,
1823 "did not receive flush compl\n");
1824 break;
1825 }
1826 be_cq_notify(adapter, rx_cq->id, true, 0);
1827 mdelay(1);
1828 } else {
1829 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00001830 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00001831 if (rxcp->num_rcvd == 0)
1832 break;
1833 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001834 }
1835
Sathya Perlad23e9462012-12-17 19:38:51 +00001836 /* After cleanup, leave the CQ in unarmed state */
1837 be_cq_notify(adapter, rx_cq->id, false, 0);
1838
1839 /* Then free posted rx buffers that were not used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001840 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001841 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001842 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001843 put_page(page_info->page);
1844 memset(page_info, 0, sizeof(*page_info));
1845 }
1846 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001847 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001848}
1849
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001850static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001851{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001852 struct be_tx_obj *txo;
1853 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001854 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001855 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001856 struct sk_buff *sent_skb;
1857 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001858 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001859
Sathya Perlaa8e91792009-08-10 03:42:43 +00001860 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1861 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001862 pending_txqs = adapter->num_tx_qs;
1863
1864 for_all_tx_queues(adapter, txo, i) {
1865 txq = &txo->q;
1866 while ((txcp = be_tx_compl_get(&txo->cq))) {
1867 end_idx =
1868 AMAP_GET_BITS(struct amap_eth_tx_compl,
1869 wrb_index, txcp);
1870 num_wrbs += be_tx_compl_process(adapter, txo,
1871 end_idx);
1872 cmpl++;
1873 }
1874 if (cmpl) {
1875 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1876 atomic_sub(num_wrbs, &txq->used);
1877 cmpl = 0;
1878 num_wrbs = 0;
1879 }
1880 if (atomic_read(&txq->used) == 0)
1881 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001882 }
1883
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001884 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001885 break;
1886
1887 mdelay(1);
1888 } while (true);
1889
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001890 for_all_tx_queues(adapter, txo, i) {
1891 txq = &txo->q;
1892 if (atomic_read(&txq->used))
1893 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1894 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001895
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001896 /* free posted tx for which compls will never arrive */
1897 while (atomic_read(&txq->used)) {
1898 sent_skb = txo->sent_skb_list[txq->tail];
1899 end_idx = txq->tail;
1900 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1901 &dummy_wrb);
1902 index_adv(&end_idx, num_wrbs - 1, txq->len);
1903 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1904 atomic_sub(num_wrbs, &txq->used);
1905 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001906 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001907}
1908
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001909static void be_evt_queues_destroy(struct be_adapter *adapter)
1910{
1911 struct be_eq_obj *eqo;
1912 int i;
1913
1914 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001915 if (eqo->q.created) {
1916 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001917 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001918 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001919 be_queue_free(adapter, &eqo->q);
1920 }
1921}
1922
1923static int be_evt_queues_create(struct be_adapter *adapter)
1924{
1925 struct be_queue_info *eq;
1926 struct be_eq_obj *eqo;
1927 int i, rc;
1928
1929 adapter->num_evt_qs = num_irqs(adapter);
1930
1931 for_all_evt_queues(adapter, eqo, i) {
1932 eqo->adapter = adapter;
1933 eqo->tx_budget = BE_TX_BUDGET;
1934 eqo->idx = i;
1935 eqo->max_eqd = BE_MAX_EQD;
1936 eqo->enable_aic = true;
1937
1938 eq = &eqo->q;
1939 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1940 sizeof(struct be_eq_entry));
1941 if (rc)
1942 return rc;
1943
1944 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1945 if (rc)
1946 return rc;
1947 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001948 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001949}
1950
Sathya Perla5fb379e2009-06-18 00:02:59 +00001951static void be_mcc_queues_destroy(struct be_adapter *adapter)
1952{
1953 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001954
Sathya Perla8788fdc2009-07-27 22:52:03 +00001955 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001956 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001957 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001958 be_queue_free(adapter, q);
1959
Sathya Perla8788fdc2009-07-27 22:52:03 +00001960 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001961 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001962 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001963 be_queue_free(adapter, q);
1964}
1965
1966/* Must be called only after TX qs are created as MCC shares TX EQ */
1967static int be_mcc_queues_create(struct be_adapter *adapter)
1968{
1969 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001970
Sathya Perla8788fdc2009-07-27 22:52:03 +00001971 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001972 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001973 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001974 goto err;
1975
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001976 /* Use the default EQ for MCC completions */
1977 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001978 goto mcc_cq_free;
1979
Sathya Perla8788fdc2009-07-27 22:52:03 +00001980 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001981 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1982 goto mcc_cq_destroy;
1983
Sathya Perla8788fdc2009-07-27 22:52:03 +00001984 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001985 goto mcc_q_free;
1986
1987 return 0;
1988
1989mcc_q_free:
1990 be_queue_free(adapter, q);
1991mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001992 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001993mcc_cq_free:
1994 be_queue_free(adapter, cq);
1995err:
1996 return -1;
1997}
1998
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001999static void be_tx_queues_destroy(struct be_adapter *adapter)
2000{
2001 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002002 struct be_tx_obj *txo;
2003 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002004
Sathya Perla3c8def92011-06-12 20:01:58 +00002005 for_all_tx_queues(adapter, txo, i) {
2006 q = &txo->q;
2007 if (q->created)
2008 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2009 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002010
Sathya Perla3c8def92011-06-12 20:01:58 +00002011 q = &txo->cq;
2012 if (q->created)
2013 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2014 be_queue_free(adapter, q);
2015 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002016}
2017
Sathya Perladafc0fe2011-10-24 02:45:02 +00002018static int be_num_txqs_want(struct be_adapter *adapter)
2019{
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002020 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
2021 be_is_mc(adapter) ||
2022 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
Sathya Perlaca34fe32012-11-06 17:48:56 +00002023 BE2_chip(adapter))
Sathya Perladafc0fe2011-10-24 02:45:02 +00002024 return 1;
2025 else
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002026 return adapter->max_tx_queues;
Sathya Perladafc0fe2011-10-24 02:45:02 +00002027}
2028
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002029static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002030{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002031 struct be_queue_info *cq, *eq;
2032 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00002033 struct be_tx_obj *txo;
2034 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002035
Sathya Perladafc0fe2011-10-24 02:45:02 +00002036 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00002037 if (adapter->num_tx_qs != MAX_TX_QS) {
2038 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00002039 netif_set_real_num_tx_queues(adapter->netdev,
2040 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00002041 rtnl_unlock();
2042 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00002043
Sathya Perla3c8def92011-06-12 20:01:58 +00002044 for_all_tx_queues(adapter, txo, i) {
2045 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002046 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2047 sizeof(struct be_eth_tx_compl));
2048 if (status)
2049 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002050
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002051 /* If num_evt_qs is less than num_tx_qs, then more than
2052 * one txq share an eq
2053 */
2054 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2055 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2056 if (status)
2057 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00002058 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002059 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002060}
2061
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002062static int be_tx_qs_create(struct be_adapter *adapter)
2063{
2064 struct be_tx_obj *txo;
2065 int i, status;
2066
2067 for_all_tx_queues(adapter, txo, i) {
2068 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2069 sizeof(struct be_eth_wrb));
2070 if (status)
2071 return status;
2072
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002073 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002074 if (status)
2075 return status;
2076 }
2077
Sathya Perlad3791422012-09-28 04:39:44 +00002078 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2079 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002080 return 0;
2081}
2082
2083static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002084{
2085 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002086 struct be_rx_obj *rxo;
2087 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002088
Sathya Perla3abcded2010-10-03 22:12:27 -07002089 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002090 q = &rxo->cq;
2091 if (q->created)
2092 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2093 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002094 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002095}
2096
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002097static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002098{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002099 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002100 struct be_rx_obj *rxo;
2101 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002102
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002103 /* We'll create as many RSS rings as there are irqs.
2104 * But when there's only one irq there's no use creating RSS rings
2105 */
2106 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2107 num_irqs(adapter) + 1 : 1;
Sathya Perla7f640062012-06-05 19:37:20 +00002108 if (adapter->num_rx_qs != MAX_RX_QS) {
2109 rtnl_lock();
2110 netif_set_real_num_rx_queues(adapter->netdev,
2111 adapter->num_rx_qs);
2112 rtnl_unlock();
2113 }
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002114
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002115 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002116 for_all_rx_queues(adapter, rxo, i) {
2117 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002118 cq = &rxo->cq;
2119 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2120 sizeof(struct be_eth_rx_compl));
2121 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002122 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002123
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002124 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2125 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002126 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002127 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002128 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002129
Sathya Perlad3791422012-09-28 04:39:44 +00002130 dev_info(&adapter->pdev->dev,
2131 "created %d RSS queue(s) and 1 default RX queue\n",
2132 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002133 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002134}
2135
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002136static irqreturn_t be_intx(int irq, void *dev)
2137{
Sathya Perlae49cc342012-11-27 19:50:02 +00002138 struct be_eq_obj *eqo = dev;
2139 struct be_adapter *adapter = eqo->adapter;
2140 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002141
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002142 /* IRQ is not expected when NAPI is scheduled as the EQ
2143 * will not be armed.
2144 * But, this can happen on Lancer INTx where it takes
2145 * a while to de-assert INTx or in BE2 where occasionaly
2146 * an interrupt may be raised even when EQ is unarmed.
2147 * If NAPI is already scheduled, then counting & notifying
2148 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002149 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002150 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002151 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002152 __napi_schedule(&eqo->napi);
2153 if (num_evts)
2154 eqo->spurious_intr = 0;
2155 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002156 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002157
2158 /* Return IRQ_HANDLED only for the the first spurious intr
2159 * after a valid intr to stop the kernel from branding
2160 * this irq as a bad one!
2161 */
2162 if (num_evts || eqo->spurious_intr++ == 0)
2163 return IRQ_HANDLED;
2164 else
2165 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002166}
2167
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002168static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002169{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002170 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002171
Sathya Perla0b545a62012-11-23 00:27:18 +00002172 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2173 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002174 return IRQ_HANDLED;
2175}
2176
Sathya Perla2e588f82011-03-11 02:49:26 +00002177static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002178{
Sathya Perla2e588f82011-03-11 02:49:26 +00002179 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002180}
2181
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002182static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2183 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002184{
Sathya Perla3abcded2010-10-03 22:12:27 -07002185 struct be_adapter *adapter = rxo->adapter;
2186 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002187 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002188 u32 work_done;
2189
2190 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002191 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002192 if (!rxcp)
2193 break;
2194
Sathya Perla12004ae2011-08-02 19:57:46 +00002195 /* Is it a flush compl that has no data */
2196 if (unlikely(rxcp->num_rcvd == 0))
2197 goto loop_continue;
2198
2199 /* Discard compl with partial DMA Lancer B0 */
2200 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002201 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002202 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002203 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002204
Sathya Perla12004ae2011-08-02 19:57:46 +00002205 /* On BE drop pkts that arrive due to imperfect filtering in
2206 * promiscuous mode on some skews
2207 */
2208 if (unlikely(rxcp->port != adapter->port_num &&
2209 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002210 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002211 goto loop_continue;
2212 }
2213
2214 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002215 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002216 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002217 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002218loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002219 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002220 }
2221
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002222 if (work_done) {
2223 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002224
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002225 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2226 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002227 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002228
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002229 return work_done;
2230}
2231
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002232static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2233 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002234{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002235 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002236 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002237
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002238 for (work_done = 0; work_done < budget; work_done++) {
2239 txcp = be_tx_compl_get(&txo->cq);
2240 if (!txcp)
2241 break;
2242 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002243 AMAP_GET_BITS(struct amap_eth_tx_compl,
2244 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002245 }
2246
2247 if (work_done) {
2248 be_cq_notify(adapter, txo->cq.id, true, work_done);
2249 atomic_sub(num_wrbs, &txo->q.used);
2250
2251 /* As Tx wrbs have been freed up, wake up netdev queue
2252 * if it was stopped due to lack of tx wrbs. */
2253 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2254 atomic_read(&txo->q.used) < txo->q.len / 2) {
2255 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002256 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002257
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002258 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2259 tx_stats(txo)->tx_compl += work_done;
2260 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2261 }
2262 return (work_done < budget); /* Done */
2263}
Sathya Perla3c8def92011-06-12 20:01:58 +00002264
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002265int be_poll(struct napi_struct *napi, int budget)
2266{
2267 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2268 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002269 int max_work = 0, work, i, num_evts;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002270 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002271
Sathya Perla0b545a62012-11-23 00:27:18 +00002272 num_evts = events_get(eqo);
2273
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002274 /* Process all TXQs serviced by this EQ */
2275 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2276 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2277 eqo->tx_budget, i);
2278 if (!tx_done)
2279 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002280 }
2281
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002282 /* This loop will iterate twice for EQ0 in which
2283 * completions of the last RXQ (default one) are also processed
2284 * For other EQs the loop iterates only once
2285 */
2286 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2287 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2288 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002289 }
2290
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002291 if (is_mcc_eqo(eqo))
2292 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002293
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002294 if (max_work < budget) {
2295 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002296 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002297 } else {
2298 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002299 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002300 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002301 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002302}
2303
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002304void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002305{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002306 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2307 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002308 u32 i;
2309
Sathya Perlad23e9462012-12-17 19:38:51 +00002310 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002311 return;
2312
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002313 if (lancer_chip(adapter)) {
2314 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2315 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2316 sliport_err1 = ioread32(adapter->db +
2317 SLIPORT_ERROR1_OFFSET);
2318 sliport_err2 = ioread32(adapter->db +
2319 SLIPORT_ERROR2_OFFSET);
2320 }
2321 } else {
2322 pci_read_config_dword(adapter->pdev,
2323 PCICFG_UE_STATUS_LOW, &ue_lo);
2324 pci_read_config_dword(adapter->pdev,
2325 PCICFG_UE_STATUS_HIGH, &ue_hi);
2326 pci_read_config_dword(adapter->pdev,
2327 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2328 pci_read_config_dword(adapter->pdev,
2329 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002330
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002331 ue_lo = (ue_lo & ~ue_lo_mask);
2332 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002333 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002334
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002335 /* On certain platforms BE hardware can indicate spurious UEs.
2336 * Allow the h/w to stop working completely in case of a real UE.
2337 * Hence not setting the hw_error for UE detection.
2338 */
2339 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002340 adapter->hw_error = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002341 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002342 "Error detected in the card\n");
2343 }
2344
2345 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2346 dev_err(&adapter->pdev->dev,
2347 "ERR: sliport status 0x%x\n", sliport_status);
2348 dev_err(&adapter->pdev->dev,
2349 "ERR: sliport error1 0x%x\n", sliport_err1);
2350 dev_err(&adapter->pdev->dev,
2351 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002352 }
2353
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002354 if (ue_lo) {
2355 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2356 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002357 dev_err(&adapter->pdev->dev,
2358 "UE: %s bit set\n", ue_status_low_desc[i]);
2359 }
2360 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002361
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002362 if (ue_hi) {
2363 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2364 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002365 dev_err(&adapter->pdev->dev,
2366 "UE: %s bit set\n", ue_status_hi_desc[i]);
2367 }
2368 }
2369
2370}
2371
Sathya Perla8d56ff12009-11-22 22:02:26 +00002372static void be_msix_disable(struct be_adapter *adapter)
2373{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002374 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002375 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002376 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002377 }
2378}
2379
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002380static uint be_num_rss_want(struct be_adapter *adapter)
2381{
Yuval Mintz30e80b52012-07-01 03:19:00 +00002382 u32 num = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002383
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002384 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002385 (lancer_chip(adapter) ||
2386 (!sriov_want(adapter) && be_physfn(adapter)))) {
2387 num = adapter->max_rss_queues;
Yuval Mintz30e80b52012-07-01 03:19:00 +00002388 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2389 }
2390 return num;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002391}
2392
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002393static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002394{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002395#define BE_MIN_MSIX_VECTORS 1
Parav Pandit045508a2012-03-26 14:27:13 +00002396 int i, status, num_vec, num_roce_vec = 0;
Sathya Perlad3791422012-09-28 04:39:44 +00002397 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002398
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002399 /* If RSS queues are not used, need a vec for default RX Q */
2400 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
Parav Pandit045508a2012-03-26 14:27:13 +00002401 if (be_roce_supported(adapter)) {
2402 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2403 (num_online_cpus() + 1));
2404 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2405 num_vec += num_roce_vec;
2406 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2407 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002408 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002409
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002410 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002411 adapter->msix_entries[i].entry = i;
2412
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002413 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002414 if (status == 0) {
2415 goto done;
2416 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002417 num_vec = status;
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002418 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2419 num_vec);
2420 if (!status)
Sathya Perla3abcded2010-10-03 22:12:27 -07002421 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002422 }
Sathya Perlad3791422012-09-28 04:39:44 +00002423
2424 dev_warn(dev, "MSIx enable failed\n");
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002425 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2426 if (!be_physfn(adapter))
2427 return status;
2428 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002429done:
Parav Pandit045508a2012-03-26 14:27:13 +00002430 if (be_roce_supported(adapter)) {
2431 if (num_vec > num_roce_vec) {
2432 adapter->num_msix_vec = num_vec - num_roce_vec;
2433 adapter->num_msix_roce_vec =
2434 num_vec - adapter->num_msix_vec;
2435 } else {
2436 adapter->num_msix_vec = num_vec;
2437 adapter->num_msix_roce_vec = 0;
2438 }
2439 } else
2440 adapter->num_msix_vec = num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002441 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002442 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002443}
2444
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002445static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002446 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002447{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002448 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002449}
2450
2451static int be_msix_register(struct be_adapter *adapter)
2452{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002453 struct net_device *netdev = adapter->netdev;
2454 struct be_eq_obj *eqo;
2455 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002456
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002457 for_all_evt_queues(adapter, eqo, i) {
2458 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2459 vec = be_msix_vec_get(adapter, eqo);
2460 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002461 if (status)
2462 goto err_msix;
2463 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002464
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002465 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002466err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002467 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2468 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2469 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2470 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002471 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002472 return status;
2473}
2474
2475static int be_irq_register(struct be_adapter *adapter)
2476{
2477 struct net_device *netdev = adapter->netdev;
2478 int status;
2479
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002480 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002481 status = be_msix_register(adapter);
2482 if (status == 0)
2483 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002484 /* INTx is not supported for VF */
2485 if (!be_physfn(adapter))
2486 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002487 }
2488
Sathya Perlae49cc342012-11-27 19:50:02 +00002489 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002490 netdev->irq = adapter->pdev->irq;
2491 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002492 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002493 if (status) {
2494 dev_err(&adapter->pdev->dev,
2495 "INTx request IRQ failed - err %d\n", status);
2496 return status;
2497 }
2498done:
2499 adapter->isr_registered = true;
2500 return 0;
2501}
2502
2503static void be_irq_unregister(struct be_adapter *adapter)
2504{
2505 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002506 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002507 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002508
2509 if (!adapter->isr_registered)
2510 return;
2511
2512 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002513 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002514 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002515 goto done;
2516 }
2517
2518 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002519 for_all_evt_queues(adapter, eqo, i)
2520 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002521
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002522done:
2523 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002524}
2525
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002526static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002527{
2528 struct be_queue_info *q;
2529 struct be_rx_obj *rxo;
2530 int i;
2531
2532 for_all_rx_queues(adapter, rxo, i) {
2533 q = &rxo->q;
2534 if (q->created) {
2535 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002536 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002537 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002538 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002539 }
2540}
2541
Sathya Perla889cd4b2010-05-30 23:33:45 +00002542static int be_close(struct net_device *netdev)
2543{
2544 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002545 struct be_eq_obj *eqo;
2546 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002547
Parav Pandit045508a2012-03-26 14:27:13 +00002548 be_roce_dev_close(adapter);
2549
Somnath Kotur04d3d622013-05-02 03:36:55 +00002550 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2551 for_all_evt_queues(adapter, eqo, i)
2552 napi_disable(&eqo->napi);
2553 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2554 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002555
2556 be_async_mcc_disable(adapter);
2557
2558 /* Wait for all pending tx completions to arrive so that
2559 * all tx skbs are freed.
2560 */
2561 be_tx_compl_clean(adapter);
Sathya Perlafba87552013-05-08 02:05:50 +00002562 netif_tx_disable(netdev);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002563
2564 be_rx_qs_destroy(adapter);
2565
2566 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002567 if (msix_enabled(adapter))
2568 synchronize_irq(be_msix_vec_get(adapter, eqo));
2569 else
2570 synchronize_irq(netdev->irq);
2571 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002572 }
2573
Sathya Perla889cd4b2010-05-30 23:33:45 +00002574 be_irq_unregister(adapter);
2575
Sathya Perla482c9e72011-06-29 23:33:17 +00002576 return 0;
2577}
2578
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002579static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002580{
2581 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002582 int rc, i, j;
2583 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002584
2585 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002586 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2587 sizeof(struct be_eth_rx_d));
2588 if (rc)
2589 return rc;
2590 }
2591
2592 /* The FW would like the default RXQ to be created first */
2593 rxo = default_rxo(adapter);
2594 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2595 adapter->if_handle, false, &rxo->rss_id);
2596 if (rc)
2597 return rc;
2598
2599 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002600 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002601 rx_frag_size, adapter->if_handle,
2602 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002603 if (rc)
2604 return rc;
2605 }
2606
2607 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002608 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2609 for_all_rss_queues(adapter, rxo, i) {
2610 if ((j + i) >= 128)
2611 break;
2612 rsstable[j + i] = rxo->rss_id;
2613 }
2614 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002615 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2616 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2617
2618 if (!BEx_chip(adapter))
2619 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2620 RSS_ENABLE_UDP_IPV6;
2621
2622 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2623 128);
2624 if (rc) {
2625 adapter->rss_flags = 0;
Sathya Perla482c9e72011-06-29 23:33:17 +00002626 return rc;
Suresh Reddy594ad542013-04-25 23:03:20 +00002627 }
Sathya Perla482c9e72011-06-29 23:33:17 +00002628 }
2629
2630 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002631 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002632 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002633 return 0;
2634}
2635
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002636static int be_open(struct net_device *netdev)
2637{
2638 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002639 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002640 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002641 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002642 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002643 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002644
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002645 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002646 if (status)
2647 goto err;
2648
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002649 status = be_irq_register(adapter);
2650 if (status)
2651 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002652
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002653 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002654 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002655
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002656 for_all_tx_queues(adapter, txo, i)
2657 be_cq_notify(adapter, txo->cq.id, true, 0);
2658
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002659 be_async_mcc_enable(adapter);
2660
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002661 for_all_evt_queues(adapter, eqo, i) {
2662 napi_enable(&eqo->napi);
2663 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2664 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002665 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002666
Sathya Perla323ff712012-09-28 04:39:43 +00002667 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002668 if (!status)
2669 be_link_status_update(adapter, link_status);
2670
Sathya Perlafba87552013-05-08 02:05:50 +00002671 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002672 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002673 return 0;
2674err:
2675 be_close(adapter->netdev);
2676 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002677}
2678
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002679static int be_setup_wol(struct be_adapter *adapter, bool enable)
2680{
2681 struct be_dma_mem cmd;
2682 int status = 0;
2683 u8 mac[ETH_ALEN];
2684
2685 memset(mac, 0, ETH_ALEN);
2686
2687 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002688 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
Joe Perches1f9061d22013-03-15 07:23:58 +00002689 GFP_KERNEL | __GFP_ZERO);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002690 if (cmd.va == NULL)
2691 return -1;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002692
2693 if (enable) {
2694 status = pci_write_config_dword(adapter->pdev,
2695 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2696 if (status) {
2697 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002698 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002699 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2700 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002701 return status;
2702 }
2703 status = be_cmd_enable_magic_wol(adapter,
2704 adapter->netdev->dev_addr, &cmd);
2705 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2706 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2707 } else {
2708 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2709 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2710 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2711 }
2712
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002713 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002714 return status;
2715}
2716
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002717/*
2718 * Generate a seed MAC address from the PF MAC Address using jhash.
2719 * MAC Address for VFs are assigned incrementally starting from the seed.
2720 * These addresses are programmed in the ASIC by the PF and the VF driver
2721 * queries for the MAC address during its probe.
2722 */
Sathya Perla4c876612013-02-03 20:30:11 +00002723static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002724{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002725 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002726 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002727 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002728 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002729
2730 be_vf_eth_addr_generate(adapter, mac);
2731
Sathya Perla11ac75e2011-12-13 00:58:50 +00002732 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002733 if (lancer_chip(adapter)) {
2734 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2735 } else {
2736 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002737 vf_cfg->if_handle,
2738 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002739 }
2740
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002741 if (status)
2742 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002743 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002744 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002745 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002746
2747 mac[5] += 1;
2748 }
2749 return status;
2750}
2751
Sathya Perla4c876612013-02-03 20:30:11 +00002752static int be_vfs_mac_query(struct be_adapter *adapter)
2753{
2754 int status, vf;
2755 u8 mac[ETH_ALEN];
2756 struct be_vf_cfg *vf_cfg;
2757 bool active;
2758
2759 for_all_vfs(adapter, vf_cfg, vf) {
2760 be_cmd_get_mac_from_list(adapter, mac, &active,
2761 &vf_cfg->pmac_id, 0);
2762
2763 status = be_cmd_mac_addr_query(adapter, mac, false,
2764 vf_cfg->if_handle, 0);
2765 if (status)
2766 return status;
2767 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2768 }
2769 return 0;
2770}
2771
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002772static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002773{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002774 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002775 u32 vf;
2776
Sathya Perla39f1d942012-05-08 19:41:24 +00002777 if (be_find_vfs(adapter, ASSIGNED)) {
Sathya Perla4c876612013-02-03 20:30:11 +00002778 dev_warn(&adapter->pdev->dev,
2779 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00002780 goto done;
2781 }
2782
Sathya Perlab4c1df92013-05-08 02:05:47 +00002783 pci_disable_sriov(adapter->pdev);
2784
Sathya Perla11ac75e2011-12-13 00:58:50 +00002785 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002786 if (lancer_chip(adapter))
2787 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2788 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002789 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2790 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002791
Sathya Perla11ac75e2011-12-13 00:58:50 +00002792 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2793 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002794done:
2795 kfree(adapter->vf_cfg);
2796 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002797}
2798
Sathya Perlaa54769f2011-10-24 02:45:00 +00002799static int be_clear(struct be_adapter *adapter)
2800{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002801 int i = 1;
2802
Sathya Perla191eb752012-02-23 18:50:13 +00002803 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2804 cancel_delayed_work_sync(&adapter->work);
2805 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2806 }
2807
Sathya Perla11ac75e2011-12-13 00:58:50 +00002808 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002809 be_vf_clear(adapter);
2810
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002811 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2812 be_cmd_pmac_del(adapter, adapter->if_handle,
2813 adapter->pmac_id[i], 0);
2814
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002815 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002816
2817 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002818 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002819 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002820 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002821
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002822 kfree(adapter->pmac_id);
2823 adapter->pmac_id = NULL;
2824
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002825 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002826 return 0;
2827}
2828
Sathya Perla4c876612013-02-03 20:30:11 +00002829static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002830{
Sathya Perla4c876612013-02-03 20:30:11 +00002831 struct be_vf_cfg *vf_cfg;
2832 u32 cap_flags, en_flags, vf;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002833 int status;
2834
Sathya Perla4c876612013-02-03 20:30:11 +00002835 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2836 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002837
Sathya Perla4c876612013-02-03 20:30:11 +00002838 for_all_vfs(adapter, vf_cfg, vf) {
2839 if (!BE3_chip(adapter))
Vasundhara Volama05f99d2013-04-21 23:28:17 +00002840 be_cmd_get_profile_config(adapter, &cap_flags,
2841 NULL, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00002842
2843 /* If a FW profile exists, then cap_flags are updated */
2844 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2845 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2846 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2847 &vf_cfg->if_handle, vf + 1);
2848 if (status)
2849 goto err;
2850 }
2851err:
2852 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002853}
2854
Sathya Perla39f1d942012-05-08 19:41:24 +00002855static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002856{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002857 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002858 int vf;
2859
Sathya Perla39f1d942012-05-08 19:41:24 +00002860 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2861 GFP_KERNEL);
2862 if (!adapter->vf_cfg)
2863 return -ENOMEM;
2864
Sathya Perla11ac75e2011-12-13 00:58:50 +00002865 for_all_vfs(adapter, vf_cfg, vf) {
2866 vf_cfg->if_handle = -1;
2867 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002868 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002869 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002870}
2871
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002872static int be_vf_setup(struct be_adapter *adapter)
2873{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002874 struct be_vf_cfg *vf_cfg;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002875 u16 def_vlan, lnk_speed;
Sathya Perla4c876612013-02-03 20:30:11 +00002876 int status, old_vfs, vf;
2877 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002878
Sathya Perla4c876612013-02-03 20:30:11 +00002879 old_vfs = be_find_vfs(adapter, ENABLED);
2880 if (old_vfs) {
2881 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2882 if (old_vfs != num_vfs)
2883 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2884 adapter->num_vfs = old_vfs;
Sathya Perla39f1d942012-05-08 19:41:24 +00002885 } else {
Sathya Perla4c876612013-02-03 20:30:11 +00002886 if (num_vfs > adapter->dev_num_vfs)
2887 dev_info(dev, "Device supports %d VFs and not %d\n",
2888 adapter->dev_num_vfs, num_vfs);
2889 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
Sathya Perlab4c1df92013-05-08 02:05:47 +00002890 if (!adapter->num_vfs)
Sathya Perla4c876612013-02-03 20:30:11 +00002891 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00002892 }
2893
2894 status = be_vf_setup_init(adapter);
2895 if (status)
2896 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002897
Sathya Perla4c876612013-02-03 20:30:11 +00002898 if (old_vfs) {
2899 for_all_vfs(adapter, vf_cfg, vf) {
2900 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2901 if (status)
2902 goto err;
2903 }
2904 } else {
2905 status = be_vfs_if_create(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002906 if (status)
2907 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002908 }
2909
Sathya Perla4c876612013-02-03 20:30:11 +00002910 if (old_vfs) {
2911 status = be_vfs_mac_query(adapter);
2912 if (status)
2913 goto err;
2914 } else {
Sathya Perla39f1d942012-05-08 19:41:24 +00002915 status = be_vf_eth_addr_config(adapter);
2916 if (status)
2917 goto err;
2918 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002919
Sathya Perla11ac75e2011-12-13 00:58:50 +00002920 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla4c876612013-02-03 20:30:11 +00002921 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2922 * Allow full available bandwidth
2923 */
2924 if (BE3_chip(adapter) && !old_vfs)
2925 be_cmd_set_qos(adapter, 1000, vf+1);
2926
2927 status = be_cmd_link_status_query(adapter, &lnk_speed,
2928 NULL, vf + 1);
2929 if (!status)
2930 vf_cfg->tx_rate = lnk_speed;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002931
2932 status = be_cmd_get_hsw_config(adapter, &def_vlan,
Sathya Perla4c876612013-02-03 20:30:11 +00002933 vf + 1, vf_cfg->if_handle);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002934 if (status)
2935 goto err;
2936 vf_cfg->def_vid = def_vlan;
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00002937
2938 be_cmd_enable_vf(adapter, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002939 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00002940
2941 if (!old_vfs) {
2942 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2943 if (status) {
2944 dev_err(dev, "SRIOV enable failed\n");
2945 adapter->num_vfs = 0;
2946 goto err;
2947 }
2948 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002949 return 0;
2950err:
Sathya Perla4c876612013-02-03 20:30:11 +00002951 dev_err(dev, "VF setup failed\n");
2952 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002953 return status;
2954}
2955
Sathya Perla30128032011-11-10 19:17:57 +00002956static void be_setup_init(struct be_adapter *adapter)
2957{
2958 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002959 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002960 adapter->if_handle = -1;
2961 adapter->be3_native = false;
2962 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002963 if (be_physfn(adapter))
2964 adapter->cmd_privileges = MAX_PRIVILEGES;
2965 else
2966 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00002967}
2968
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002969static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2970 bool *active_mac, u32 *pmac_id)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002971{
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002972 int status = 0;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002973
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002974 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2975 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2976 if (!lancer_chip(adapter) && !be_physfn(adapter))
2977 *active_mac = true;
2978 else
2979 *active_mac = false;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002980
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002981 return status;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002982 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002983
2984 if (lancer_chip(adapter)) {
2985 status = be_cmd_get_mac_from_list(adapter, mac,
2986 active_mac, pmac_id, 0);
2987 if (*active_mac) {
Sathya Perla5ee49792012-09-28 04:39:41 +00002988 status = be_cmd_mac_addr_query(adapter, mac, false,
2989 if_handle, *pmac_id);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002990 }
2991 } else if (be_physfn(adapter)) {
2992 /* For BE3, for PF get permanent MAC */
Sathya Perla5ee49792012-09-28 04:39:41 +00002993 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002994 *active_mac = false;
2995 } else {
2996 /* For BE3, for VF get soft MAC assigned by PF*/
Sathya Perla5ee49792012-09-28 04:39:41 +00002997 status = be_cmd_mac_addr_query(adapter, mac, false,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002998 if_handle, 0);
2999 *active_mac = true;
3000 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003001 return status;
3002}
3003
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003004static void be_get_resources(struct be_adapter *adapter)
3005{
Sathya Perla4c876612013-02-03 20:30:11 +00003006 u16 dev_num_vfs;
3007 int pos, status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003008 bool profile_present = false;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003009 u16 txq_count = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003010
Sathya Perla4c876612013-02-03 20:30:11 +00003011 if (!BEx_chip(adapter)) {
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003012 status = be_cmd_get_func_config(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003013 if (!status)
3014 profile_present = true;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003015 } else if (BE3_chip(adapter) && be_physfn(adapter)) {
3016 be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003017 }
3018
3019 if (profile_present) {
3020 /* Sanity fixes for Lancer */
3021 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
3022 BE_UC_PMAC_COUNT);
3023 adapter->max_vlans = min_t(u16, adapter->max_vlans,
3024 BE_NUM_VLANS_SUPPORTED);
3025 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
3026 BE_MAX_MC);
3027 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3028 MAX_TX_QS);
3029 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
3030 BE3_MAX_RSS_QS);
3031 adapter->max_event_queues = min_t(u16,
3032 adapter->max_event_queues,
3033 BE3_MAX_RSS_QS);
3034
3035 if (adapter->max_rss_queues &&
3036 adapter->max_rss_queues == adapter->max_rx_queues)
3037 adapter->max_rss_queues -= 1;
3038
3039 if (adapter->max_event_queues < adapter->max_rss_queues)
3040 adapter->max_rss_queues = adapter->max_event_queues;
3041
3042 } else {
3043 if (be_physfn(adapter))
3044 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3045 else
3046 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3047
3048 if (adapter->function_mode & FLEX10_MODE)
3049 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3050 else
3051 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3052
3053 adapter->max_mcast_mac = BE_MAX_MC;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003054 adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
3055 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3056 MAX_TX_QS);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003057 adapter->max_rss_queues = (adapter->be3_native) ?
3058 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3059 adapter->max_event_queues = BE3_MAX_RSS_QS;
3060
3061 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
3062 BE_IF_FLAGS_BROADCAST |
3063 BE_IF_FLAGS_MULTICAST |
3064 BE_IF_FLAGS_PASS_L3L4_ERRORS |
3065 BE_IF_FLAGS_MCAST_PROMISCUOUS |
3066 BE_IF_FLAGS_VLAN_PROMISCUOUS |
3067 BE_IF_FLAGS_PROMISCUOUS;
3068
3069 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3070 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
3071 }
Sathya Perla4c876612013-02-03 20:30:11 +00003072
3073 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
3074 if (pos) {
3075 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
3076 &dev_num_vfs);
3077 if (BE3_chip(adapter))
3078 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
3079 adapter->dev_num_vfs = dev_num_vfs;
3080 }
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003081}
3082
Sathya Perla39f1d942012-05-08 19:41:24 +00003083/* Routine to query per function resource limits */
3084static int be_get_config(struct be_adapter *adapter)
3085{
Sathya Perla4c876612013-02-03 20:30:11 +00003086 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003087
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003088 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3089 &adapter->function_mode,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003090 &adapter->function_caps,
3091 &adapter->asic_rev);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003092 if (status)
3093 goto err;
3094
3095 be_get_resources(adapter);
3096
3097 /* primary mac needs 1 pmac entry */
3098 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3099 sizeof(u32), GFP_KERNEL);
3100 if (!adapter->pmac_id) {
3101 status = -ENOMEM;
3102 goto err;
3103 }
3104
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003105err:
3106 return status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003107}
3108
Sathya Perla5fb379e2009-06-18 00:02:59 +00003109static int be_setup(struct be_adapter *adapter)
3110{
Sathya Perla39f1d942012-05-08 19:41:24 +00003111 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003112 u32 en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003113 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003114 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003115 u8 mac[ETH_ALEN];
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003116 bool active_mac;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003117
Sathya Perla30128032011-11-10 19:17:57 +00003118 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003119
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003120 if (!lancer_chip(adapter))
3121 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003122
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003123 status = be_get_config(adapter);
3124 if (status)
3125 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003126
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003127 status = be_msix_enable(adapter);
3128 if (status)
3129 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003130
3131 status = be_evt_queues_create(adapter);
3132 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003133 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003134
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003135 status = be_tx_cqs_create(adapter);
3136 if (status)
3137 goto err;
3138
3139 status = be_rx_cqs_create(adapter);
3140 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003141 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003142
Sathya Perla5fb379e2009-06-18 00:02:59 +00003143 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003144 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003145 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003146
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003147 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3148 /* In UMC mode FW does not return right privileges.
3149 * Override with correct privilege equivalent to PF.
3150 */
3151 if (be_is_mc(adapter))
3152 adapter->cmd_privileges = MAX_PRIVILEGES;
3153
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003154 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3155 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00003156
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003157 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003158 en_flags |= BE_IF_FLAGS_RSS;
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003159
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003160 en_flags = en_flags & adapter->if_cap_flags;
Padmanabh Ratnakar0b13fb42012-07-18 02:51:58 +00003161
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003162 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003163 &adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003164 if (status != 0)
3165 goto err;
3166
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003167 memset(mac, 0, ETH_ALEN);
3168 active_mac = false;
3169 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3170 &active_mac, &adapter->pmac_id[0]);
3171 if (status != 0)
3172 goto err;
3173
3174 if (!active_mac) {
3175 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3176 &adapter->pmac_id[0], 0);
3177 if (status != 0)
3178 goto err;
3179 }
3180
3181 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3182 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3183 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003184 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00003185
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003186 status = be_tx_qs_create(adapter);
3187 if (status)
3188 goto err;
3189
Sathya Perla04b71172011-09-27 13:30:27 -04003190 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003191
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003192 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003193 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003194
3195 be_set_rx_mode(adapter->netdev);
3196
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003197 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003198
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003199 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3200 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003201 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003202
Sathya Perlab4c1df92013-05-08 02:05:47 +00003203 if (be_physfn(adapter)) {
Sathya Perla39f1d942012-05-08 19:41:24 +00003204 if (adapter->dev_num_vfs)
3205 be_vf_setup(adapter);
3206 else
3207 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003208 }
3209
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003210 status = be_cmd_get_phy_info(adapter);
3211 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003212 adapter->phy.fc_autoneg = 1;
3213
Sathya Perla191eb752012-02-23 18:50:13 +00003214 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3215 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003216 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003217err:
3218 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003219 return status;
3220}
3221
Ivan Vecera66268732011-12-08 01:31:21 +00003222#ifdef CONFIG_NET_POLL_CONTROLLER
3223static void be_netpoll(struct net_device *netdev)
3224{
3225 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003226 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003227 int i;
3228
Sathya Perlae49cc342012-11-27 19:50:02 +00003229 for_all_evt_queues(adapter, eqo, i) {
3230 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3231 napi_schedule(&eqo->napi);
3232 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003233
3234 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003235}
3236#endif
3237
Ajit Khaparde84517482009-09-04 03:12:16 +00003238#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003239char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3240
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003241static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003242 const u8 *p, u32 img_start, int image_size,
3243 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003244{
3245 u32 crc_offset;
3246 u8 flashed_crc[4];
3247 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003248
3249 crc_offset = hdr_size + img_start + image_size - 4;
3250
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003251 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003252
3253 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003254 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003255 if (status) {
3256 dev_err(&adapter->pdev->dev,
3257 "could not get crc from flash, not flashing redboot\n");
3258 return false;
3259 }
3260
3261 /*update redboot only if crc does not match*/
3262 if (!memcmp(flashed_crc, p, 4))
3263 return false;
3264 else
3265 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003266}
3267
Sathya Perla306f1342011-08-02 19:57:45 +00003268static bool phy_flashing_required(struct be_adapter *adapter)
3269{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003270 return (adapter->phy.phy_type == TN_8022 &&
3271 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003272}
3273
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003274static bool is_comp_in_ufi(struct be_adapter *adapter,
3275 struct flash_section_info *fsec, int type)
3276{
3277 int i = 0, img_type = 0;
3278 struct flash_section_info_g2 *fsec_g2 = NULL;
3279
Sathya Perlaca34fe32012-11-06 17:48:56 +00003280 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003281 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3282
3283 for (i = 0; i < MAX_FLASH_COMP; i++) {
3284 if (fsec_g2)
3285 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3286 else
3287 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3288
3289 if (img_type == type)
3290 return true;
3291 }
3292 return false;
3293
3294}
3295
3296struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3297 int header_size,
3298 const struct firmware *fw)
3299{
3300 struct flash_section_info *fsec = NULL;
3301 const u8 *p = fw->data;
3302
3303 p += header_size;
3304 while (p < (fw->data + fw->size)) {
3305 fsec = (struct flash_section_info *)p;
3306 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3307 return fsec;
3308 p += 32;
3309 }
3310 return NULL;
3311}
3312
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003313static int be_flash(struct be_adapter *adapter, const u8 *img,
3314 struct be_dma_mem *flash_cmd, int optype, int img_size)
3315{
3316 u32 total_bytes = 0, flash_op, num_bytes = 0;
3317 int status = 0;
3318 struct be_cmd_write_flashrom *req = flash_cmd->va;
3319
3320 total_bytes = img_size;
3321 while (total_bytes) {
3322 num_bytes = min_t(u32, 32*1024, total_bytes);
3323
3324 total_bytes -= num_bytes;
3325
3326 if (!total_bytes) {
3327 if (optype == OPTYPE_PHY_FW)
3328 flash_op = FLASHROM_OPER_PHY_FLASH;
3329 else
3330 flash_op = FLASHROM_OPER_FLASH;
3331 } else {
3332 if (optype == OPTYPE_PHY_FW)
3333 flash_op = FLASHROM_OPER_PHY_SAVE;
3334 else
3335 flash_op = FLASHROM_OPER_SAVE;
3336 }
3337
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003338 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003339 img += num_bytes;
3340 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3341 flash_op, num_bytes);
3342 if (status) {
3343 if (status == ILLEGAL_IOCTL_REQ &&
3344 optype == OPTYPE_PHY_FW)
3345 break;
3346 dev_err(&adapter->pdev->dev,
3347 "cmd to write to flash rom failed.\n");
3348 return status;
3349 }
3350 }
3351 return 0;
3352}
3353
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003354/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003355static int be_flash_BEx(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003356 const struct firmware *fw,
3357 struct be_dma_mem *flash_cmd,
3358 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003359
Ajit Khaparde84517482009-09-04 03:12:16 +00003360{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003361 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003362 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003363 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003364 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003365 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003366 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003367
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003368 struct flash_comp gen3_flash_types[] = {
3369 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3370 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3371 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3372 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3373 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3374 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3375 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3376 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3377 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3378 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3379 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3380 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3381 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3382 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3383 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3384 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3385 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3386 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3387 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3388 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003389 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003390
3391 struct flash_comp gen2_flash_types[] = {
3392 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3393 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3394 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3395 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3396 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3397 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3398 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3399 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3400 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3401 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3402 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3403 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3404 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3405 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3406 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3407 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003408 };
3409
Sathya Perlaca34fe32012-11-06 17:48:56 +00003410 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003411 pflashcomp = gen3_flash_types;
3412 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003413 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003414 } else {
3415 pflashcomp = gen2_flash_types;
3416 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003417 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003418 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003419
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003420 /* Get flash section info*/
3421 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3422 if (!fsec) {
3423 dev_err(&adapter->pdev->dev,
3424 "Invalid Cookie. UFI corrupted ?\n");
3425 return -1;
3426 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003427 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003428 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003429 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003430
3431 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3432 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3433 continue;
3434
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003435 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3436 !phy_flashing_required(adapter))
3437 continue;
3438
3439 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3440 redboot = be_flash_redboot(adapter, fw->data,
3441 pflashcomp[i].offset, pflashcomp[i].size,
3442 filehdr_size + img_hdrs_size);
3443 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003444 continue;
3445 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003446
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003447 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003448 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003449 if (p + pflashcomp[i].size > fw->data + fw->size)
3450 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003451
3452 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3453 pflashcomp[i].size);
3454 if (status) {
3455 dev_err(&adapter->pdev->dev,
3456 "Flashing section type %d failed.\n",
3457 pflashcomp[i].img_type);
3458 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003459 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003460 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003461 return 0;
3462}
3463
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003464static int be_flash_skyhawk(struct be_adapter *adapter,
3465 const struct firmware *fw,
3466 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003467{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003468 int status = 0, i, filehdr_size = 0;
3469 int img_offset, img_size, img_optype, redboot;
3470 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3471 const u8 *p = fw->data;
3472 struct flash_section_info *fsec = NULL;
3473
3474 filehdr_size = sizeof(struct flash_file_hdr_g3);
3475 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3476 if (!fsec) {
3477 dev_err(&adapter->pdev->dev,
3478 "Invalid Cookie. UFI corrupted ?\n");
3479 return -1;
3480 }
3481
3482 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3483 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3484 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3485
3486 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3487 case IMAGE_FIRMWARE_iSCSI:
3488 img_optype = OPTYPE_ISCSI_ACTIVE;
3489 break;
3490 case IMAGE_BOOT_CODE:
3491 img_optype = OPTYPE_REDBOOT;
3492 break;
3493 case IMAGE_OPTION_ROM_ISCSI:
3494 img_optype = OPTYPE_BIOS;
3495 break;
3496 case IMAGE_OPTION_ROM_PXE:
3497 img_optype = OPTYPE_PXE_BIOS;
3498 break;
3499 case IMAGE_OPTION_ROM_FCoE:
3500 img_optype = OPTYPE_FCOE_BIOS;
3501 break;
3502 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3503 img_optype = OPTYPE_ISCSI_BACKUP;
3504 break;
3505 case IMAGE_NCSI:
3506 img_optype = OPTYPE_NCSI_FW;
3507 break;
3508 default:
3509 continue;
3510 }
3511
3512 if (img_optype == OPTYPE_REDBOOT) {
3513 redboot = be_flash_redboot(adapter, fw->data,
3514 img_offset, img_size,
3515 filehdr_size + img_hdrs_size);
3516 if (!redboot)
3517 continue;
3518 }
3519
3520 p = fw->data;
3521 p += filehdr_size + img_offset + img_hdrs_size;
3522 if (p + img_size > fw->data + fw->size)
3523 return -1;
3524
3525 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3526 if (status) {
3527 dev_err(&adapter->pdev->dev,
3528 "Flashing section type %d failed.\n",
3529 fsec->fsec_entry[i].type);
3530 return status;
3531 }
3532 }
3533 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003534}
3535
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003536static int lancer_wait_idle(struct be_adapter *adapter)
3537{
3538#define SLIPORT_IDLE_TIMEOUT 30
3539 u32 reg_val;
3540 int status = 0, i;
3541
3542 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3543 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3544 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3545 break;
3546
3547 ssleep(1);
3548 }
3549
3550 if (i == SLIPORT_IDLE_TIMEOUT)
3551 status = -1;
3552
3553 return status;
3554}
3555
3556static int lancer_fw_reset(struct be_adapter *adapter)
3557{
3558 int status = 0;
3559
3560 status = lancer_wait_idle(adapter);
3561 if (status)
3562 return status;
3563
3564 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3565 PHYSDEV_CONTROL_OFFSET);
3566
3567 return status;
3568}
3569
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003570static int lancer_fw_download(struct be_adapter *adapter,
3571 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003572{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003573#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3574#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3575 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003576 const u8 *data_ptr = NULL;
3577 u8 *dest_image_ptr = NULL;
3578 size_t image_size = 0;
3579 u32 chunk_size = 0;
3580 u32 data_written = 0;
3581 u32 offset = 0;
3582 int status = 0;
3583 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003584 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003585
3586 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3587 dev_err(&adapter->pdev->dev,
3588 "FW Image not properly aligned. "
3589 "Length must be 4 byte aligned.\n");
3590 status = -EINVAL;
3591 goto lancer_fw_exit;
3592 }
3593
3594 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3595 + LANCER_FW_DOWNLOAD_CHUNK;
3596 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00003597 &flash_cmd.dma, GFP_KERNEL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003598 if (!flash_cmd.va) {
3599 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003600 goto lancer_fw_exit;
3601 }
3602
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003603 dest_image_ptr = flash_cmd.va +
3604 sizeof(struct lancer_cmd_req_write_object);
3605 image_size = fw->size;
3606 data_ptr = fw->data;
3607
3608 while (image_size) {
3609 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3610
3611 /* Copy the image chunk content. */
3612 memcpy(dest_image_ptr, data_ptr, chunk_size);
3613
3614 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003615 chunk_size, offset,
3616 LANCER_FW_DOWNLOAD_LOCATION,
3617 &data_written, &change_status,
3618 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003619 if (status)
3620 break;
3621
3622 offset += data_written;
3623 data_ptr += data_written;
3624 image_size -= data_written;
3625 }
3626
3627 if (!status) {
3628 /* Commit the FW written */
3629 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003630 0, offset,
3631 LANCER_FW_DOWNLOAD_LOCATION,
3632 &data_written, &change_status,
3633 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003634 }
3635
3636 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3637 flash_cmd.dma);
3638 if (status) {
3639 dev_err(&adapter->pdev->dev,
3640 "Firmware load error. "
3641 "Status code: 0x%x Additional Status: 0x%x\n",
3642 status, add_status);
3643 goto lancer_fw_exit;
3644 }
3645
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003646 if (change_status == LANCER_FW_RESET_NEEDED) {
3647 status = lancer_fw_reset(adapter);
3648 if (status) {
3649 dev_err(&adapter->pdev->dev,
3650 "Adapter busy for FW reset.\n"
3651 "New FW will not be active.\n");
3652 goto lancer_fw_exit;
3653 }
3654 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3655 dev_err(&adapter->pdev->dev,
3656 "System reboot required for new FW"
3657 " to be active\n");
3658 }
3659
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003660 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3661lancer_fw_exit:
3662 return status;
3663}
3664
Sathya Perlaca34fe32012-11-06 17:48:56 +00003665#define UFI_TYPE2 2
3666#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003667#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00003668#define UFI_TYPE4 4
3669static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003670 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003671{
3672 if (fhdr == NULL)
3673 goto be_get_ufi_exit;
3674
Sathya Perlaca34fe32012-11-06 17:48:56 +00003675 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3676 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003677 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3678 if (fhdr->asic_type_rev == 0x10)
3679 return UFI_TYPE3R;
3680 else
3681 return UFI_TYPE3;
3682 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00003683 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003684
3685be_get_ufi_exit:
3686 dev_err(&adapter->pdev->dev,
3687 "UFI and Interface are not compatible for flashing\n");
3688 return -1;
3689}
3690
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003691static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3692{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003693 struct flash_file_hdr_g3 *fhdr3;
3694 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003695 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003696 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003697 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003698
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003699 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003700 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3701 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003702 if (!flash_cmd.va) {
3703 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003704 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003705 }
3706
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003707 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003708 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003709
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003710 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003711
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003712 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3713 for (i = 0; i < num_imgs; i++) {
3714 img_hdr_ptr = (struct image_hdr *)(fw->data +
3715 (sizeof(struct flash_file_hdr_g3) +
3716 i * sizeof(struct image_hdr)));
3717 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003718 switch (ufi_type) {
3719 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003720 status = be_flash_skyhawk(adapter, fw,
3721 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003722 break;
3723 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00003724 status = be_flash_BEx(adapter, fw, &flash_cmd,
3725 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003726 break;
3727 case UFI_TYPE3:
3728 /* Do not flash this ufi on BE3-R cards */
3729 if (adapter->asic_rev < 0x10)
3730 status = be_flash_BEx(adapter, fw,
3731 &flash_cmd,
3732 num_imgs);
3733 else {
3734 status = -1;
3735 dev_err(&adapter->pdev->dev,
3736 "Can't load BE3 UFI on BE3R\n");
3737 }
3738 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003739 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003740 }
3741
Sathya Perlaca34fe32012-11-06 17:48:56 +00003742 if (ufi_type == UFI_TYPE2)
3743 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003744 else if (ufi_type == -1)
3745 status = -1;
3746
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003747 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3748 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003749 if (status) {
3750 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003751 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003752 }
3753
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003754 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003755
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003756be_fw_exit:
3757 return status;
3758}
3759
3760int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3761{
3762 const struct firmware *fw;
3763 int status;
3764
3765 if (!netif_running(adapter->netdev)) {
3766 dev_err(&adapter->pdev->dev,
3767 "Firmware load not allowed (interface is down)\n");
3768 return -1;
3769 }
3770
3771 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3772 if (status)
3773 goto fw_exit;
3774
3775 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3776
3777 if (lancer_chip(adapter))
3778 status = lancer_fw_download(adapter, fw);
3779 else
3780 status = be_fw_download(adapter, fw);
3781
Ajit Khaparde84517482009-09-04 03:12:16 +00003782fw_exit:
3783 release_firmware(fw);
3784 return status;
3785}
3786
stephen hemmingere5686ad2012-01-05 19:10:25 +00003787static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003788 .ndo_open = be_open,
3789 .ndo_stop = be_close,
3790 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003791 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003792 .ndo_set_mac_address = be_mac_addr_set,
3793 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003794 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003795 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003796 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3797 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003798 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003799 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003800 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003801 .ndo_get_vf_config = be_get_vf_config,
3802#ifdef CONFIG_NET_POLL_CONTROLLER
3803 .ndo_poll_controller = be_netpoll,
3804#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003805};
3806
3807static void be_netdev_init(struct net_device *netdev)
3808{
3809 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003810 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003811 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003812
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003813 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003814 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00003815 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003816 if (be_multi_rxq(adapter))
3817 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003818
3819 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00003820 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003821
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003822 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003823 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003824
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003825 netdev->priv_flags |= IFF_UNICAST_FLT;
3826
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003827 netdev->flags |= IFF_MULTICAST;
3828
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00003829 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003830
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003831 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003832
3833 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3834
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003835 for_all_evt_queues(adapter, eqo, i)
3836 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003837}
3838
3839static void be_unmap_pci_bars(struct be_adapter *adapter)
3840{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003841 if (adapter->csr)
3842 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003843 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00003844 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003845}
3846
Sathya Perlace66f782012-11-06 17:48:58 +00003847static int db_bar(struct be_adapter *adapter)
3848{
3849 if (lancer_chip(adapter) || !be_physfn(adapter))
3850 return 0;
3851 else
3852 return 4;
3853}
3854
3855static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00003856{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00003857 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00003858 adapter->roce_db.size = 4096;
3859 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3860 db_bar(adapter));
3861 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3862 db_bar(adapter));
3863 }
Parav Pandit045508a2012-03-26 14:27:13 +00003864 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003865}
3866
3867static int be_map_pci_bars(struct be_adapter *adapter)
3868{
3869 u8 __iomem *addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003870 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003871
Sathya Perlace66f782012-11-06 17:48:58 +00003872 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3873 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3874 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003875
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003876 if (BEx_chip(adapter) && be_physfn(adapter)) {
3877 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3878 if (adapter->csr == NULL)
3879 return -ENOMEM;
3880 }
3881
Sathya Perlace66f782012-11-06 17:48:58 +00003882 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003883 if (addr == NULL)
3884 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003885 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003886
3887 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003888 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00003889
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003890pci_map_err:
3891 be_unmap_pci_bars(adapter);
3892 return -ENOMEM;
3893}
3894
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003895static void be_ctrl_cleanup(struct be_adapter *adapter)
3896{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003897 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003898
3899 be_unmap_pci_bars(adapter);
3900
3901 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003902 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3903 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003904
Sathya Perla5b8821b2011-08-02 19:57:44 +00003905 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003906 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003907 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3908 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003909}
3910
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003911static int be_ctrl_init(struct be_adapter *adapter)
3912{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003913 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3914 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003915 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00003916 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003917 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003918
Sathya Perlace66f782012-11-06 17:48:58 +00003919 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3920 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3921 SLI_INTF_FAMILY_SHIFT;
3922 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3923
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003924 status = be_map_pci_bars(adapter);
3925 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003926 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003927
3928 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003929 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3930 mbox_mem_alloc->size,
3931 &mbox_mem_alloc->dma,
3932 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003933 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003934 status = -ENOMEM;
3935 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003936 }
3937 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3938 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3939 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3940 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003941
Sathya Perla5b8821b2011-08-02 19:57:44 +00003942 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3943 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
Joe Perches1f9061d22013-03-15 07:23:58 +00003944 &rx_filter->dma,
3945 GFP_KERNEL | __GFP_ZERO);
Sathya Perla5b8821b2011-08-02 19:57:44 +00003946 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003947 status = -ENOMEM;
3948 goto free_mbox;
3949 }
Joe Perches1f9061d22013-03-15 07:23:58 +00003950
Ivan Vecera29849612010-12-14 05:43:19 +00003951 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003952 spin_lock_init(&adapter->mcc_lock);
3953 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003954
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003955 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003956 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003957 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003958
3959free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003960 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3961 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003962
3963unmap_pci_bars:
3964 be_unmap_pci_bars(adapter);
3965
3966done:
3967 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003968}
3969
3970static void be_stats_cleanup(struct be_adapter *adapter)
3971{
Sathya Perla3abcded2010-10-03 22:12:27 -07003972 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003973
3974 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003975 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3976 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003977}
3978
3979static int be_stats_init(struct be_adapter *adapter)
3980{
Sathya Perla3abcded2010-10-03 22:12:27 -07003981 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003982
Sathya Perlaca34fe32012-11-06 17:48:56 +00003983 if (lancer_chip(adapter))
3984 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3985 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003986 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Sathya Perlaca34fe32012-11-06 17:48:56 +00003987 else
3988 /* BE3 and Skyhawk */
3989 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3990
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003991 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
Joe Perches1f9061d22013-03-15 07:23:58 +00003992 GFP_KERNEL | __GFP_ZERO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003993 if (cmd->va == NULL)
3994 return -1;
3995 return 0;
3996}
3997
Bill Pemberton3bc6b062012-12-03 09:23:09 -05003998static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003999{
4000 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004001
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004002 if (!adapter)
4003 return;
4004
Parav Pandit045508a2012-03-26 14:27:13 +00004005 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004006 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004007
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004008 cancel_delayed_work_sync(&adapter->func_recovery_work);
4009
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004010 unregister_netdev(adapter->netdev);
4011
Sathya Perla5fb379e2009-06-18 00:02:59 +00004012 be_clear(adapter);
4013
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004014 /* tell fw we're done with firing cmds */
4015 be_cmd_fw_clean(adapter);
4016
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004017 be_stats_cleanup(adapter);
4018
4019 be_ctrl_cleanup(adapter);
4020
Sathya Perlad6b6d982012-09-05 01:56:48 +00004021 pci_disable_pcie_error_reporting(pdev);
4022
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004023 pci_set_drvdata(pdev, NULL);
4024 pci_release_regions(pdev);
4025 pci_disable_device(pdev);
4026
4027 free_netdev(adapter->netdev);
4028}
4029
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004030bool be_is_wol_supported(struct be_adapter *adapter)
4031{
4032 return ((adapter->wol_cap & BE_WOL_CAP) &&
4033 !be_is_wol_excluded(adapter)) ? true : false;
4034}
4035
Somnath Kotur941a77d2012-05-17 22:59:03 +00004036u32 be_get_fw_log_level(struct be_adapter *adapter)
4037{
4038 struct be_dma_mem extfat_cmd;
4039 struct be_fat_conf_params *cfgs;
4040 int status;
4041 u32 level = 0;
4042 int j;
4043
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004044 if (lancer_chip(adapter))
4045 return 0;
4046
Somnath Kotur941a77d2012-05-17 22:59:03 +00004047 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4048 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4049 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4050 &extfat_cmd.dma);
4051
4052 if (!extfat_cmd.va) {
4053 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4054 __func__);
4055 goto err;
4056 }
4057
4058 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4059 if (!status) {
4060 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4061 sizeof(struct be_cmd_resp_hdr));
Anton Blanchardac46a462012-07-24 15:05:25 +00004062 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
Somnath Kotur941a77d2012-05-17 22:59:03 +00004063 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4064 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4065 }
4066 }
4067 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4068 extfat_cmd.dma);
4069err:
4070 return level;
4071}
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004072
Sathya Perla39f1d942012-05-08 19:41:24 +00004073static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004074{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004075 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00004076 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004077
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004078 status = be_cmd_get_cntl_attributes(adapter);
4079 if (status)
4080 return status;
4081
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004082 status = be_cmd_get_acpi_wol_cap(adapter);
4083 if (status) {
4084 /* in case of a failure to get wol capabillities
4085 * check the exclusion list to determine WOL capability */
4086 if (!be_is_wol_excluded(adapter))
4087 adapter->wol_cap |= BE_WOL_CAP;
4088 }
4089
4090 if (be_is_wol_supported(adapter))
4091 adapter->wol = true;
4092
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004093 /* Must be a power of 2 or else MODULO will BUG_ON */
4094 adapter->be_get_temp_freq = 64;
4095
Somnath Kotur941a77d2012-05-17 22:59:03 +00004096 level = be_get_fw_log_level(adapter);
4097 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4098
Sathya Perla2243e2e2009-11-22 22:02:03 +00004099 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004100}
4101
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004102static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004103{
4104 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004105
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004106 status = lancer_test_and_set_rdy_state(adapter);
4107 if (status)
4108 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004109
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004110 if (netif_running(adapter->netdev))
4111 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004112
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004113 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004114
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004115 adapter->hw_error = false;
4116 adapter->fw_timeout = false;
4117
4118 status = be_setup(adapter);
4119 if (status)
4120 goto err;
4121
4122 if (netif_running(adapter->netdev)) {
4123 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004124 if (status)
4125 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004126 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004127
4128 dev_err(&adapter->pdev->dev,
4129 "Adapter SLIPORT recovery succeeded\n");
4130 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004131err:
Padmanabh Ratnakar67297ad2012-10-20 06:02:27 +00004132 if (adapter->eeh_error)
4133 dev_err(&adapter->pdev->dev,
4134 "Adapter SLIPORT recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004135
4136 return status;
4137}
4138
4139static void be_func_recovery_task(struct work_struct *work)
4140{
4141 struct be_adapter *adapter =
4142 container_of(work, struct be_adapter, func_recovery_work.work);
4143 int status;
4144
4145 be_detect_error(adapter);
4146
4147 if (adapter->hw_error && lancer_chip(adapter)) {
4148
4149 if (adapter->eeh_error)
4150 goto out;
4151
4152 rtnl_lock();
4153 netif_device_detach(adapter->netdev);
4154 rtnl_unlock();
4155
4156 status = lancer_recover_func(adapter);
4157
4158 if (!status)
4159 netif_device_attach(adapter->netdev);
4160 }
4161
4162out:
4163 schedule_delayed_work(&adapter->func_recovery_work,
4164 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004165}
4166
4167static void be_worker(struct work_struct *work)
4168{
4169 struct be_adapter *adapter =
4170 container_of(work, struct be_adapter, work.work);
4171 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004172 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004173 int i;
4174
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004175 /* when interrupts are not yet enabled, just reap any pending
4176 * mcc completions */
4177 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004178 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004179 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004180 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004181 goto reschedule;
4182 }
4183
4184 if (!adapter->stats_cmd_sent) {
4185 if (lancer_chip(adapter))
4186 lancer_cmd_get_pport_stats(adapter,
4187 &adapter->stats_cmd);
4188 else
4189 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4190 }
4191
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004192 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4193 be_cmd_get_die_temperature(adapter);
4194
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004195 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004196 if (rxo->rx_post_starved) {
4197 rxo->rx_post_starved = false;
4198 be_post_rx_frags(rxo, GFP_KERNEL);
4199 }
4200 }
4201
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004202 for_all_evt_queues(adapter, eqo, i)
4203 be_eqd_update(adapter, eqo);
4204
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004205reschedule:
4206 adapter->work_counter++;
4207 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4208}
4209
Sathya Perla39f1d942012-05-08 19:41:24 +00004210static bool be_reset_required(struct be_adapter *adapter)
4211{
Sathya Perlad79c0a22012-06-05 19:37:22 +00004212 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004213}
4214
Sathya Perlad3791422012-09-28 04:39:44 +00004215static char *mc_name(struct be_adapter *adapter)
4216{
4217 if (adapter->function_mode & FLEX10_MODE)
4218 return "FLEX10";
4219 else if (adapter->function_mode & VNIC_MODE)
4220 return "vNIC";
4221 else if (adapter->function_mode & UMC_ENABLED)
4222 return "UMC";
4223 else
4224 return "";
4225}
4226
4227static inline char *func_name(struct be_adapter *adapter)
4228{
4229 return be_physfn(adapter) ? "PF" : "VF";
4230}
4231
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004232static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004233{
4234 int status = 0;
4235 struct be_adapter *adapter;
4236 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004237 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004238
4239 status = pci_enable_device(pdev);
4240 if (status)
4241 goto do_none;
4242
4243 status = pci_request_regions(pdev, DRV_NAME);
4244 if (status)
4245 goto disable_dev;
4246 pci_set_master(pdev);
4247
Sathya Perla7f640062012-06-05 19:37:20 +00004248 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004249 if (netdev == NULL) {
4250 status = -ENOMEM;
4251 goto rel_reg;
4252 }
4253 adapter = netdev_priv(netdev);
4254 adapter->pdev = pdev;
4255 pci_set_drvdata(pdev, adapter);
4256 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004257 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004258
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004259 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004260 if (!status) {
Craig Hada2bd92cd2013-04-21 23:28:18 +00004261 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4262 if (status < 0) {
4263 dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4264 goto free_netdev;
4265 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004266 netdev->features |= NETIF_F_HIGHDMA;
4267 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004268 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004269 if (status) {
4270 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4271 goto free_netdev;
4272 }
4273 }
4274
Sathya Perlad6b6d982012-09-05 01:56:48 +00004275 status = pci_enable_pcie_error_reporting(pdev);
4276 if (status)
4277 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4278
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004279 status = be_ctrl_init(adapter);
4280 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004281 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004282
Sathya Perla2243e2e2009-11-22 22:02:03 +00004283 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004284 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004285 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004286 if (status)
4287 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004288 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004289
Sathya Perla39f1d942012-05-08 19:41:24 +00004290 if (be_reset_required(adapter)) {
4291 status = be_cmd_reset_function(adapter);
4292 if (status)
4293 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004294
Kalesh AP2d177be2013-04-28 22:22:29 +00004295 /* Wait for interrupts to quiesce after an FLR */
4296 msleep(100);
4297 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004298
4299 /* Allow interrupts for other ULPs running on NIC function */
4300 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004301
Kalesh AP2d177be2013-04-28 22:22:29 +00004302 /* tell fw we're ready to fire cmds */
4303 status = be_cmd_fw_init(adapter);
4304 if (status)
4305 goto ctrl_clean;
4306
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004307 status = be_stats_init(adapter);
4308 if (status)
4309 goto ctrl_clean;
4310
Sathya Perla39f1d942012-05-08 19:41:24 +00004311 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004312 if (status)
4313 goto stats_clean;
4314
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004315 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004316 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004317 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004318
Sathya Perla5fb379e2009-06-18 00:02:59 +00004319 status = be_setup(adapter);
4320 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004321 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004322
Sathya Perla3abcded2010-10-03 22:12:27 -07004323 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004324 status = register_netdev(netdev);
4325 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004326 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004327
Parav Pandit045508a2012-03-26 14:27:13 +00004328 be_roce_dev_add(adapter);
4329
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004330 schedule_delayed_work(&adapter->func_recovery_work,
4331 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004332
4333 be_cmd_query_port_name(adapter, &port_name);
4334
Sathya Perlad3791422012-09-28 04:39:44 +00004335 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4336 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004337
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004338 return 0;
4339
Sathya Perla5fb379e2009-06-18 00:02:59 +00004340unsetup:
4341 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004342stats_clean:
4343 be_stats_cleanup(adapter);
4344ctrl_clean:
4345 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004346free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004347 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004348 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004349rel_reg:
4350 pci_release_regions(pdev);
4351disable_dev:
4352 pci_disable_device(pdev);
4353do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004354 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004355 return status;
4356}
4357
4358static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4359{
4360 struct be_adapter *adapter = pci_get_drvdata(pdev);
4361 struct net_device *netdev = adapter->netdev;
4362
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004363 if (adapter->wol)
4364 be_setup_wol(adapter, true);
4365
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004366 cancel_delayed_work_sync(&adapter->func_recovery_work);
4367
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004368 netif_device_detach(netdev);
4369 if (netif_running(netdev)) {
4370 rtnl_lock();
4371 be_close(netdev);
4372 rtnl_unlock();
4373 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004374 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004375
4376 pci_save_state(pdev);
4377 pci_disable_device(pdev);
4378 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4379 return 0;
4380}
4381
4382static int be_resume(struct pci_dev *pdev)
4383{
4384 int status = 0;
4385 struct be_adapter *adapter = pci_get_drvdata(pdev);
4386 struct net_device *netdev = adapter->netdev;
4387
4388 netif_device_detach(netdev);
4389
4390 status = pci_enable_device(pdev);
4391 if (status)
4392 return status;
4393
4394 pci_set_power_state(pdev, 0);
4395 pci_restore_state(pdev);
4396
Sathya Perla2243e2e2009-11-22 22:02:03 +00004397 /* tell fw we're ready to fire cmds */
4398 status = be_cmd_fw_init(adapter);
4399 if (status)
4400 return status;
4401
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004402 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004403 if (netif_running(netdev)) {
4404 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004405 be_open(netdev);
4406 rtnl_unlock();
4407 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004408
4409 schedule_delayed_work(&adapter->func_recovery_work,
4410 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004411 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004412
4413 if (adapter->wol)
4414 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004415
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004416 return 0;
4417}
4418
Sathya Perla82456b02010-02-17 01:35:37 +00004419/*
4420 * An FLR will stop BE from DMAing any data.
4421 */
4422static void be_shutdown(struct pci_dev *pdev)
4423{
4424 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004425
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004426 if (!adapter)
4427 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004428
Sathya Perla0f4a6822011-03-21 20:49:28 +00004429 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004430 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004431
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004432 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004433
Ajit Khaparde57841862011-04-06 18:08:43 +00004434 be_cmd_reset_function(adapter);
4435
Sathya Perla82456b02010-02-17 01:35:37 +00004436 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004437}
4438
Sathya Perlacf588472010-02-14 21:22:01 +00004439static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4440 pci_channel_state_t state)
4441{
4442 struct be_adapter *adapter = pci_get_drvdata(pdev);
4443 struct net_device *netdev = adapter->netdev;
4444
4445 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4446
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004447 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004448
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004449 cancel_delayed_work_sync(&adapter->func_recovery_work);
4450
4451 rtnl_lock();
Sathya Perlacf588472010-02-14 21:22:01 +00004452 netif_device_detach(netdev);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004453 rtnl_unlock();
Sathya Perlacf588472010-02-14 21:22:01 +00004454
4455 if (netif_running(netdev)) {
4456 rtnl_lock();
4457 be_close(netdev);
4458 rtnl_unlock();
4459 }
4460 be_clear(adapter);
4461
4462 if (state == pci_channel_io_perm_failure)
4463 return PCI_ERS_RESULT_DISCONNECT;
4464
4465 pci_disable_device(pdev);
4466
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004467 /* The error could cause the FW to trigger a flash debug dump.
4468 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004469 * can cause it not to recover; wait for it to finish.
4470 * Wait only for first function as it is needed only once per
4471 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004472 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004473 if (pdev->devfn == 0)
4474 ssleep(30);
4475
Sathya Perlacf588472010-02-14 21:22:01 +00004476 return PCI_ERS_RESULT_NEED_RESET;
4477}
4478
4479static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4480{
4481 struct be_adapter *adapter = pci_get_drvdata(pdev);
4482 int status;
4483
4484 dev_info(&adapter->pdev->dev, "EEH reset\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004485 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004486
4487 status = pci_enable_device(pdev);
4488 if (status)
4489 return PCI_ERS_RESULT_DISCONNECT;
4490
4491 pci_set_master(pdev);
4492 pci_set_power_state(pdev, 0);
4493 pci_restore_state(pdev);
4494
4495 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004496 dev_info(&adapter->pdev->dev,
4497 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004498 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004499 if (status)
4500 return PCI_ERS_RESULT_DISCONNECT;
4501
Sathya Perlad6b6d982012-09-05 01:56:48 +00004502 pci_cleanup_aer_uncorrect_error_status(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004503 return PCI_ERS_RESULT_RECOVERED;
4504}
4505
4506static void be_eeh_resume(struct pci_dev *pdev)
4507{
4508 int status = 0;
4509 struct be_adapter *adapter = pci_get_drvdata(pdev);
4510 struct net_device *netdev = adapter->netdev;
4511
4512 dev_info(&adapter->pdev->dev, "EEH resume\n");
4513
4514 pci_save_state(pdev);
4515
Kalesh AP2d177be2013-04-28 22:22:29 +00004516 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004517 if (status)
4518 goto err;
4519
Kalesh AP2d177be2013-04-28 22:22:29 +00004520 /* tell fw we're ready to fire cmds */
4521 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004522 if (status)
4523 goto err;
4524
Sathya Perlacf588472010-02-14 21:22:01 +00004525 status = be_setup(adapter);
4526 if (status)
4527 goto err;
4528
4529 if (netif_running(netdev)) {
4530 status = be_open(netdev);
4531 if (status)
4532 goto err;
4533 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004534
4535 schedule_delayed_work(&adapter->func_recovery_work,
4536 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004537 netif_device_attach(netdev);
4538 return;
4539err:
4540 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004541}
4542
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004543static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004544 .error_detected = be_eeh_err_detected,
4545 .slot_reset = be_eeh_reset,
4546 .resume = be_eeh_resume,
4547};
4548
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004549static struct pci_driver be_driver = {
4550 .name = DRV_NAME,
4551 .id_table = be_dev_ids,
4552 .probe = be_probe,
4553 .remove = be_remove,
4554 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004555 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004556 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004557 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004558};
4559
4560static int __init be_init_module(void)
4561{
Joe Perches8e95a202009-12-03 07:58:21 +00004562 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4563 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004564 printk(KERN_WARNING DRV_NAME
4565 " : Module param rx_frag_size must be 2048/4096/8192."
4566 " Using 2048\n");
4567 rx_frag_size = 2048;
4568 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004569
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004570 return pci_register_driver(&be_driver);
4571}
4572module_init(be_init_module);
4573
4574static void __exit be_exit_module(void)
4575{
4576 pci_unregister_driver(&be_driver);
4577}
4578module_exit(be_exit_module);