blob: 21808680b91fb3b8e238b9be5d3bcf34096eeacc [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamc7bb15a2013-03-06 20:05:05 +00002 * Copyright (C) 2005 - 2013 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070024
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000028MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_LICENSE("GPL");
30
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla11ac75e2011-12-13 00:58:50 +000035static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070048 { 0 }
49};
50MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000051/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070052static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000053 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85};
86/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070087static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000088 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700111 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700121
Sathya Perla752961a2011-10-24 02:45:03 +0000122/* Is BE in a multi-channel mode */
123static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127}
128
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700129static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130{
131 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000135 mem->va = NULL;
136 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
Joe Perches1f9061d22013-03-15 07:23:58 +0000149 GFP_KERNEL | __GFP_ZERO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000151 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 return 0;
153}
154
Somnath Kotur68c45a22013-03-14 02:42:07 +0000155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169
Sathya Perladb3ea782011-08-22 19:41:52 +0000170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Somnath Kotur68c45a22013-03-14 02:42:07 +0000174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
Sathya Perla8788fdc2009-07-27 22:52:03 +0000190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700191{
192 u32 val = 0;
193 val |= qid & DB_RQ_RING_ID_MASK;
194 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000195
196 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000197 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700198}
199
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000200static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
201 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202{
203 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000206
207 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000208 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209}
210
Sathya Perla8788fdc2009-07-27 22:52:03 +0000211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700212 bool arm, bool clear_int, u16 num_popped)
213{
214 u32 val = 0;
215 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000216 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000218
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000219 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000220 return;
221
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700229}
230
Sathya Perla8788fdc2009-07-27 22:52:03 +0000231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232{
233 u32 val = 0;
234 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000235 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000237
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000238 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000239 return;
240
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241 if (arm)
242 val |= 1 << DB_CQ_REARM_SHIFT;
243 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000244 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245}
246
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247static int be_mac_addr_set(struct net_device *netdev, void *p)
248{
249 struct be_adapter *adapter = netdev_priv(netdev);
250 struct sockaddr *addr = p;
251 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000252 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000253 u32 pmac_id = adapter->pmac_id[0];
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000254 bool active_mac = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700255
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000256 if (!is_valid_ether_addr(addr->sa_data))
257 return -EADDRNOTAVAIL;
258
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000259 /* For BE VF, MAC address is already activated by PF.
260 * Hence only operation left is updating netdev->devaddr.
261 * Update it if user is passing the same MAC which was used
262 * during configuring VF MAC from PF(Hypervisor).
263 */
264 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
265 status = be_cmd_mac_addr_query(adapter, current_mac,
266 false, adapter->if_handle, 0);
267 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
268 goto done;
269 else
270 goto err;
271 }
272
273 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
274 goto done;
275
276 /* For Lancer check if any MAC is active.
277 * If active, get its mac id.
278 */
279 if (lancer_chip(adapter) && !be_physfn(adapter))
280 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
281 &pmac_id, 0);
282
283 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
284 adapter->if_handle,
285 &adapter->pmac_id[0], 0);
286
Sathya Perlaa65027e2009-08-17 00:58:04 +0000287 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000288 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700289
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000290 if (active_mac)
291 be_cmd_pmac_del(adapter, adapter->if_handle,
292 pmac_id, 0);
293done:
Somnath Koture3a7ae22011-10-27 07:14:05 +0000294 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
295 return 0;
296err:
297 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700298 return status;
299}
300
Sathya Perlaca34fe32012-11-06 17:48:56 +0000301/* BE2 supports only v0 cmd */
302static void *hw_stats_from_cmd(struct be_adapter *adapter)
303{
304 if (BE2_chip(adapter)) {
305 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
306
307 return &cmd->hw_stats;
308 } else {
309 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
310
311 return &cmd->hw_stats;
312 }
313}
314
315/* BE2 supports only v0 cmd */
316static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
317{
318 if (BE2_chip(adapter)) {
319 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
320
321 return &hw_stats->erx;
322 } else {
323 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
324
325 return &hw_stats->erx;
326 }
327}
328
329static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000330{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000334 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000335 &rxf_stats->port[adapter->port_num];
336 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000337
Sathya Perlaac124ff2011-07-25 19:10:14 +0000338 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000339 drvs->rx_pause_frames = port_stats->rx_pause_frames;
340 drvs->rx_crc_errors = port_stats->rx_crc_errors;
341 drvs->rx_control_frames = port_stats->rx_control_frames;
342 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000354 drvs->rx_dropped_header_too_small =
355 port_stats->rx_dropped_header_too_small;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000356 drvs->rx_address_mismatch_drops =
357 port_stats->rx_address_mismatch_drops +
358 port_stats->rx_vlan_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000359 drvs->rx_alignment_symbol_errors =
360 port_stats->rx_alignment_symbol_errors;
361
362 drvs->tx_pauseframes = port_stats->tx_pauseframes;
363 drvs->tx_controlframes = port_stats->tx_controlframes;
364
365 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000366 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000367 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000368 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000369 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000371 drvs->forwarded_packets = rxf_stats->forwarded_packets;
372 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000373 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000375 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
376}
377
Sathya Perlaca34fe32012-11-06 17:48:56 +0000378static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000379{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000380 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000383 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 &rxf_stats->port[adapter->port_num];
385 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386
Sathya Perlaac124ff2011-07-25 19:10:14 +0000387 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000388 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390 drvs->rx_pause_frames = port_stats->rx_pause_frames;
391 drvs->rx_crc_errors = port_stats->rx_crc_errors;
392 drvs->rx_control_frames = port_stats->rx_control_frames;
393 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403 drvs->rx_dropped_header_too_small =
404 port_stats->rx_dropped_header_too_small;
405 drvs->rx_input_fifo_overflow_drop =
406 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000407 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000408 drvs->rx_alignment_symbol_errors =
409 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000410 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000411 drvs->tx_pauseframes = port_stats->tx_pauseframes;
412 drvs->tx_controlframes = port_stats->tx_controlframes;
413 drvs->jabber_events = port_stats->jabber_events;
414 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000415 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000416 drvs->forwarded_packets = rxf_stats->forwarded_packets;
417 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000418 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
419 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000420 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
421}
422
Selvin Xavier005d5692011-05-16 07:36:35 +0000423static void populate_lancer_stats(struct be_adapter *adapter)
424{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000425
Selvin Xavier005d5692011-05-16 07:36:35 +0000426 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000427 struct lancer_pport_stats *pport_stats =
428 pport_stats_from_cmd(adapter);
429
430 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
431 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
432 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
433 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000434 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000435 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000436 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
437 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
438 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
439 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
440 drvs->rx_dropped_tcp_length =
441 pport_stats->rx_dropped_invalid_tcp_length;
442 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
443 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
444 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
445 drvs->rx_dropped_header_too_small =
446 pport_stats->rx_dropped_header_too_small;
447 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000448 drvs->rx_address_mismatch_drops =
449 pport_stats->rx_address_mismatch_drops +
450 pport_stats->rx_vlan_mismatch_drops;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000451 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000452 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000453 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
454 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000455 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000456 drvs->forwarded_packets = pport_stats->num_forwards_lo;
457 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000458 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000459 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000460}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000461
Sathya Perla09c1c682011-08-22 19:41:53 +0000462static void accumulate_16bit_val(u32 *acc, u16 val)
463{
464#define lo(x) (x & 0xFFFF)
465#define hi(x) (x & 0xFFFF0000)
466 bool wrapped = val < lo(*acc);
467 u32 newacc = hi(*acc) + val;
468
469 if (wrapped)
470 newacc += 65536;
471 ACCESS_ONCE(*acc) = newacc;
472}
473
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000474void be_parse_stats(struct be_adapter *adapter)
475{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000476 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
477 struct be_rx_obj *rxo;
478 int i;
479
Sathya Perlaca34fe32012-11-06 17:48:56 +0000480 if (lancer_chip(adapter)) {
481 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000482 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000483 if (BE2_chip(adapter))
484 populate_be_v0_stats(adapter);
485 else
486 /* for BE3 and Skyhawk */
487 populate_be_v1_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000488
Sathya Perlaca34fe32012-11-06 17:48:56 +0000489 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
490 for_all_rx_queues(adapter, rxo, i) {
491 /* below erx HW counter can actually wrap around after
492 * 65535. Driver accumulates a 32-bit value
493 */
494 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
495 (u16)erx->rx_drops_no_fragments \
496 [rxo->q.id]);
497 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000498 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000499}
500
Sathya Perlaab1594e2011-07-25 19:10:15 +0000501static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
502 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700503{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000504 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000505 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700506 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000507 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000508 u64 pkts, bytes;
509 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700510 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700511
Sathya Perla3abcded2010-10-03 22:12:27 -0700512 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000513 const struct be_rx_stats *rx_stats = rx_stats(rxo);
514 do {
515 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
516 pkts = rx_stats(rxo)->rx_pkts;
517 bytes = rx_stats(rxo)->rx_bytes;
518 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
519 stats->rx_packets += pkts;
520 stats->rx_bytes += bytes;
521 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
522 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
523 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700524 }
525
Sathya Perla3c8def92011-06-12 20:01:58 +0000526 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000527 const struct be_tx_stats *tx_stats = tx_stats(txo);
528 do {
529 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
530 pkts = tx_stats(txo)->tx_pkts;
531 bytes = tx_stats(txo)->tx_bytes;
532 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
533 stats->tx_packets += pkts;
534 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000535 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700536
537 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000538 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000539 drvs->rx_alignment_symbol_errors +
540 drvs->rx_in_range_errors +
541 drvs->rx_out_range_errors +
542 drvs->rx_frame_too_long +
543 drvs->rx_dropped_too_small +
544 drvs->rx_dropped_too_short +
545 drvs->rx_dropped_header_too_small +
546 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000547 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700548
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700549 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000550 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000551 drvs->rx_out_range_errors +
552 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000553
Sathya Perlaab1594e2011-07-25 19:10:15 +0000554 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700555
556 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000557 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000558
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700559 /* receiver fifo overrun */
560 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000561 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000562 drvs->rx_input_fifo_overflow_drop +
563 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000564 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700565}
566
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000567void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700568{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700569 struct net_device *netdev = adapter->netdev;
570
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000571 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000572 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000573 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700574 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000575
576 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
577 netif_carrier_on(netdev);
578 else
579 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700580}
581
Sathya Perla3c8def92011-06-12 20:01:58 +0000582static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000583 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700584{
Sathya Perla3c8def92011-06-12 20:01:58 +0000585 struct be_tx_stats *stats = tx_stats(txo);
586
Sathya Perlaab1594e2011-07-25 19:10:15 +0000587 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000588 stats->tx_reqs++;
589 stats->tx_wrbs += wrb_cnt;
590 stats->tx_bytes += copied;
591 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700592 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000593 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000594 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700595}
596
597/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000598static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
599 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700600{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700601 int cnt = (skb->len > skb->data_len);
602
603 cnt += skb_shinfo(skb)->nr_frags;
604
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700605 /* to account for hdr wrb */
606 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000607 if (lancer_chip(adapter) || !(cnt & 1)) {
608 *dummy = false;
609 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700610 /* add a dummy to make it an even num */
611 cnt++;
612 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000613 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700614 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
615 return cnt;
616}
617
618static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
619{
620 wrb->frag_pa_hi = upper_32_bits(addr);
621 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
622 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000623 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700624}
625
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000626static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
627 struct sk_buff *skb)
628{
629 u8 vlan_prio;
630 u16 vlan_tag;
631
632 vlan_tag = vlan_tx_tag_get(skb);
633 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
634 /* If vlan priority provided by OS is NOT in available bmap */
635 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
636 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
637 adapter->recommended_prio;
638
639 return vlan_tag;
640}
641
Somnath Kotur93040ae2012-06-26 22:32:10 +0000642static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
643{
644 return vlan_tx_tag_present(skb) || adapter->pvid;
645}
646
Somnath Koturcc4ce022010-10-21 07:11:14 -0700647static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
648 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700649{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000650 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700651
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 memset(hdr, 0, sizeof(*hdr));
653
654 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
655
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000656 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
658 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
659 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000660 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000661 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
663 if (is_tcp_pkt(skb))
664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
665 else if (is_udp_pkt(skb))
666 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
667 }
668
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700669 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000671 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700672 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700673 }
674
675 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
676 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
677 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
678 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
679}
680
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000681static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000682 bool unmap_single)
683{
684 dma_addr_t dma;
685
686 be_dws_le_to_cpu(wrb, sizeof(*wrb));
687
688 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000689 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000690 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000691 dma_unmap_single(dev, dma, wrb->frag_len,
692 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000693 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000694 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000695 }
696}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697
Sathya Perla3c8def92011-06-12 20:01:58 +0000698static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700699 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
700{
Sathya Perla7101e112010-03-22 20:41:12 +0000701 dma_addr_t busaddr;
702 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000703 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700704 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700705 struct be_eth_wrb *wrb;
706 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000707 bool map_single = false;
708 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700709
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700710 hdr = queue_head_node(txq);
711 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000712 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700713
David S. Millerebc8d2a2009-06-09 01:01:31 -0700714 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700715 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000716 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
717 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000718 goto dma_err;
719 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700720 wrb = queue_head_node(txq);
721 wrb_fill(wrb, busaddr, len);
722 be_dws_cpu_to_le(wrb, sizeof(*wrb));
723 queue_head_inc(txq);
724 copied += len;
725 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700726
David S. Millerebc8d2a2009-06-09 01:01:31 -0700727 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000728 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700729 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000730 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000731 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000732 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000733 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700734 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000735 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700736 be_dws_cpu_to_le(wrb, sizeof(*wrb));
737 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000738 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700739 }
740
741 if (dummy_wrb) {
742 wrb = queue_head_node(txq);
743 wrb_fill(wrb, 0, 0);
744 be_dws_cpu_to_le(wrb, sizeof(*wrb));
745 queue_head_inc(txq);
746 }
747
Somnath Koturcc4ce022010-10-21 07:11:14 -0700748 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700749 be_dws_cpu_to_le(hdr, sizeof(*hdr));
750
751 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000752dma_err:
753 txq->head = map_head;
754 while (copied) {
755 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000756 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000757 map_single = false;
758 copied -= wrb->frag_len;
759 queue_head_inc(txq);
760 }
761 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700762}
763
Somnath Kotur93040ae2012-06-26 22:32:10 +0000764static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
765 struct sk_buff *skb)
766{
767 u16 vlan_tag = 0;
768
769 skb = skb_share_check(skb, GFP_ATOMIC);
770 if (unlikely(!skb))
771 return skb;
772
773 if (vlan_tx_tag_present(skb)) {
774 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Patrick McHardy86a9bad2013-04-19 02:04:30 +0000775 __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Somnath Kotur93040ae2012-06-26 22:32:10 +0000776 skb->vlan_tci = 0;
777 }
778
779 return skb;
780}
781
Stephen Hemminger613573252009-08-31 19:50:58 +0000782static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700783 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700784{
785 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000786 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
787 struct be_queue_info *txq = &txo->q;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000788 struct iphdr *ip = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700789 u32 wrb_cnt = 0, copied = 0;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000790 u32 start = txq->head, eth_hdr_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700791 bool dummy_wrb, stopped = false;
792
Somnath Kotur93040ae2012-06-26 22:32:10 +0000793 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
794 VLAN_ETH_HLEN : ETH_HLEN;
795
796 /* HW has a bug which considers padding bytes as legal
797 * and modifies the IPv4 hdr's 'tot_len' field
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000798 */
Somnath Kotur93040ae2012-06-26 22:32:10 +0000799 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
800 is_ipv4_pkt(skb)) {
801 ip = (struct iphdr *)ip_hdr(skb);
802 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
803 }
804
805 /* HW has a bug wherein it will calculate CSUM for VLAN
806 * pkts even though it is disabled.
807 * Manually insert VLAN in pkt.
808 */
809 if (skb->ip_summed != CHECKSUM_PARTIAL &&
810 be_vlan_tag_chk(adapter, skb)) {
811 skb = be_insert_vlan_in_pkt(adapter, skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000812 if (unlikely(!skb))
813 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000814 }
815
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000816 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700817
Sathya Perla3c8def92011-06-12 20:01:58 +0000818 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000819 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000820 int gso_segs = skb_shinfo(skb)->gso_segs;
821
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000822 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000823 BUG_ON(txo->sent_skb_list[start]);
824 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700825
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000826 /* Ensure txq has space for the next skb; Else stop the queue
827 * *BEFORE* ringing the tx doorbell, so that we serialze the
828 * tx compls of the current transmit which'll wake up the queue
829 */
Sathya Perla7101e112010-03-22 20:41:12 +0000830 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000831 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
832 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000833 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000834 stopped = true;
835 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700836
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000837 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000838
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000839 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000840 } else {
841 txq->head = start;
842 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700843 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000844tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700845 return NETDEV_TX_OK;
846}
847
848static int be_change_mtu(struct net_device *netdev, int new_mtu)
849{
850 struct be_adapter *adapter = netdev_priv(netdev);
851 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000852 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
853 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700854 dev_info(&adapter->pdev->dev,
855 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000856 BE_MIN_MTU,
857 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700858 return -EINVAL;
859 }
860 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
861 netdev->mtu, new_mtu);
862 netdev->mtu = new_mtu;
863 return 0;
864}
865
866/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000867 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
868 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700869 */
Sathya Perla10329df2012-06-05 19:37:18 +0000870static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700871{
Sathya Perla10329df2012-06-05 19:37:18 +0000872 u16 vids[BE_NUM_VLANS_SUPPORTED];
873 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000874 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000875
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000876 /* No need to further configure vids if in promiscuous mode */
877 if (adapter->promiscuous)
878 return 0;
879
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000880 if (adapter->vlans_added > adapter->max_vlans)
881 goto set_vlan_promisc;
882
883 /* Construct VLAN Table to give to HW */
884 for (i = 0; i < VLAN_N_VID; i++)
885 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +0000886 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000887
888 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +0000889 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000890
891 /* Set to VLAN promisc mode as setting VLAN filter failed */
892 if (status) {
893 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
894 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
895 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700896 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000897
Sathya Perlab31c50a2009-09-17 10:30:13 -0700898 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000899
900set_vlan_promisc:
901 status = be_cmd_vlan_config(adapter, adapter->if_handle,
902 NULL, 0, 1, 1);
903 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700904}
905
Patrick McHardy80d5c362013-04-19 02:04:28 +0000906static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700907{
908 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000909 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700910
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000911 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000912 status = -EINVAL;
913 goto ret;
914 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000915
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000916 /* Packets with VID 0 are always received by Lancer by default */
917 if (lancer_chip(adapter) && vid == 0)
918 goto ret;
919
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700920 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000921 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Sathya Perla10329df2012-06-05 19:37:18 +0000922 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500923
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000924 if (!status)
925 adapter->vlans_added++;
926 else
927 adapter->vlan_tag[vid] = 0;
928ret:
929 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700930}
931
Patrick McHardy80d5c362013-04-19 02:04:28 +0000932static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700933{
934 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000935 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700936
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000937 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000938 status = -EINVAL;
939 goto ret;
940 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000941
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000942 /* Packets with VID 0 are always received by Lancer by default */
943 if (lancer_chip(adapter) && vid == 0)
944 goto ret;
945
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700946 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000947 if (adapter->vlans_added <= adapter->max_vlans)
Sathya Perla10329df2012-06-05 19:37:18 +0000948 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500949
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000950 if (!status)
951 adapter->vlans_added--;
952 else
953 adapter->vlan_tag[vid] = 1;
954ret:
955 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700956}
957
Sathya Perlaa54769f2011-10-24 02:45:00 +0000958static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700959{
960 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000961 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700962
963 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000964 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000965 adapter->promiscuous = true;
966 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700967 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000968
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300969 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000970 if (adapter->promiscuous) {
971 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000972 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000973
974 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +0000975 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +0000976 }
977
Sathya Perlae7b909a2009-11-22 22:01:10 +0000978 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000979 if (netdev->flags & IFF_ALLMULTI ||
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +0000980 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000981 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000982 goto done;
983 }
984
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000985 if (netdev_uc_count(netdev) != adapter->uc_macs) {
986 struct netdev_hw_addr *ha;
987 int i = 1; /* First slot is claimed by the Primary MAC */
988
989 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
990 be_cmd_pmac_del(adapter, adapter->if_handle,
991 adapter->pmac_id[i], 0);
992 }
993
994 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
995 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
996 adapter->promiscuous = true;
997 goto done;
998 }
999
1000 netdev_for_each_uc_addr(ha, adapter->netdev) {
1001 adapter->uc_macs++; /* First slot is for Primary MAC */
1002 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1003 adapter->if_handle,
1004 &adapter->pmac_id[adapter->uc_macs], 0);
1005 }
1006 }
1007
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001008 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1009
1010 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1011 if (status) {
1012 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1013 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1014 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1015 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001016done:
1017 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001018}
1019
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001020static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1021{
1022 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001023 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001024 int status;
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +00001025 bool active_mac = false;
1026 u32 pmac_id;
1027 u8 old_mac[ETH_ALEN];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001028
Sathya Perla11ac75e2011-12-13 00:58:50 +00001029 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001030 return -EPERM;
1031
Sathya Perla11ac75e2011-12-13 00:58:50 +00001032 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001033 return -EINVAL;
1034
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001035 if (lancer_chip(adapter)) {
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +00001036 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1037 &pmac_id, vf + 1);
1038 if (!status && active_mac)
1039 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1040 pmac_id, vf + 1);
1041
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001042 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1043 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +00001044 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1045 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001046
Sathya Perla11ac75e2011-12-13 00:58:50 +00001047 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1048 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001049 }
1050
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001051 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001052 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1053 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001054 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001055 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001056
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001057 return status;
1058}
1059
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001060static int be_get_vf_config(struct net_device *netdev, int vf,
1061 struct ifla_vf_info *vi)
1062{
1063 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001064 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001065
Sathya Perla11ac75e2011-12-13 00:58:50 +00001066 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001067 return -EPERM;
1068
Sathya Perla11ac75e2011-12-13 00:58:50 +00001069 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001070 return -EINVAL;
1071
1072 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001073 vi->tx_rate = vf_cfg->tx_rate;
1074 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001075 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001076 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001077
1078 return 0;
1079}
1080
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001081static int be_set_vf_vlan(struct net_device *netdev,
1082 int vf, u16 vlan, u8 qos)
1083{
1084 struct be_adapter *adapter = netdev_priv(netdev);
1085 int status = 0;
1086
Sathya Perla11ac75e2011-12-13 00:58:50 +00001087 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001088 return -EPERM;
1089
Sathya Perla11ac75e2011-12-13 00:58:50 +00001090 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001091 return -EINVAL;
1092
1093 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001094 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1095 /* If this is new value, program it. Else skip. */
1096 adapter->vf_cfg[vf].vlan_tag = vlan;
1097
1098 status = be_cmd_set_hsw_config(adapter, vlan,
1099 vf + 1, adapter->vf_cfg[vf].if_handle);
1100 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001101 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001102 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001103 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001104 vlan = adapter->vf_cfg[vf].def_vid;
1105 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1106 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001107 }
1108
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001109
1110 if (status)
1111 dev_info(&adapter->pdev->dev,
1112 "VLAN %d config on VF %d failed\n", vlan, vf);
1113 return status;
1114}
1115
Ajit Khapardee1d18732010-07-23 01:52:13 +00001116static int be_set_vf_tx_rate(struct net_device *netdev,
1117 int vf, int rate)
1118{
1119 struct be_adapter *adapter = netdev_priv(netdev);
1120 int status = 0;
1121
Sathya Perla11ac75e2011-12-13 00:58:50 +00001122 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001123 return -EPERM;
1124
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001125 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001126 return -EINVAL;
1127
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001128 if (rate < 100 || rate > 10000) {
1129 dev_err(&adapter->pdev->dev,
1130 "tx rate must be between 100 and 10000 Mbps\n");
1131 return -EINVAL;
1132 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001133
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001134 if (lancer_chip(adapter))
1135 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1136 else
1137 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001138
1139 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001140 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001141 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001142 else
1143 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001144 return status;
1145}
1146
Sathya Perla39f1d942012-05-08 19:41:24 +00001147static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1148{
1149 struct pci_dev *dev, *pdev = adapter->pdev;
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001150 int vfs = 0, assigned_vfs = 0, pos;
Sathya Perla39f1d942012-05-08 19:41:24 +00001151 u16 offset, stride;
1152
1153 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
Sathya Perlad79c0a22012-06-05 19:37:22 +00001154 if (!pos)
1155 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00001156 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1157 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1158
1159 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1160 while (dev) {
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001161 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
Sathya Perla39f1d942012-05-08 19:41:24 +00001162 vfs++;
1163 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1164 assigned_vfs++;
1165 }
1166 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1167 }
1168 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1169}
1170
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001171static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001172{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001173 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001174 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001175 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001176 u64 pkts;
1177 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001178
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001179 if (!eqo->enable_aic) {
1180 eqd = eqo->eqd;
1181 goto modify_eqd;
1182 }
1183
1184 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001185 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001186
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001187 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1188
Sathya Perla4097f662009-03-24 16:40:13 -07001189 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001190 if (time_before(now, stats->rx_jiffies)) {
1191 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001192 return;
1193 }
1194
Sathya Perlaac124ff2011-07-25 19:10:14 +00001195 /* Update once a second */
1196 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001197 return;
1198
Sathya Perlaab1594e2011-07-25 19:10:15 +00001199 do {
1200 start = u64_stats_fetch_begin_bh(&stats->sync);
1201 pkts = stats->rx_pkts;
1202 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1203
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001204 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001205 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001206 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001207 eqd = (stats->rx_pps / 110000) << 3;
1208 eqd = min(eqd, eqo->max_eqd);
1209 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001210 if (eqd < 10)
1211 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001212
1213modify_eqd:
1214 if (eqd != eqo->cur_eqd) {
1215 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1216 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001217 }
Sathya Perla4097f662009-03-24 16:40:13 -07001218}
1219
Sathya Perla3abcded2010-10-03 22:12:27 -07001220static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001221 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001222{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001223 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001224
Sathya Perlaab1594e2011-07-25 19:10:15 +00001225 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001226 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001227 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001228 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001229 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001230 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001231 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001232 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001233 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001234}
1235
Sathya Perla2e588f82011-03-11 02:49:26 +00001236static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001237{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001238 /* L4 checksum is not reliable for non TCP/UDP packets.
1239 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001240 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1241 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001242}
1243
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001244static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1245 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001246{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001247 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001248 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001249 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001250
Sathya Perla3abcded2010-10-03 22:12:27 -07001251 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001252 BUG_ON(!rx_page_info->page);
1253
Ajit Khaparde205859a2010-02-09 01:34:21 +00001254 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001255 dma_unmap_page(&adapter->pdev->dev,
1256 dma_unmap_addr(rx_page_info, bus),
1257 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001258 rx_page_info->last_page_user = false;
1259 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001260
1261 atomic_dec(&rxq->used);
1262 return rx_page_info;
1263}
1264
1265/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001266static void be_rx_compl_discard(struct be_rx_obj *rxo,
1267 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001268{
Sathya Perla3abcded2010-10-03 22:12:27 -07001269 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001270 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001271 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001272
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001273 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001274 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001275 put_page(page_info->page);
1276 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001277 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001278 }
1279}
1280
1281/*
1282 * skb_fill_rx_data forms a complete skb for an ether frame
1283 * indicated by rxcp.
1284 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001285static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1286 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001287{
Sathya Perla3abcded2010-10-03 22:12:27 -07001288 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001289 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001290 u16 i, j;
1291 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001292 u8 *start;
1293
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001294 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001295 start = page_address(page_info->page) + page_info->page_offset;
1296 prefetch(start);
1297
1298 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001299 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001300
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001301 skb->len = curr_frag_len;
1302 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001303 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001304 /* Complete packet has now been moved to data */
1305 put_page(page_info->page);
1306 skb->data_len = 0;
1307 skb->tail += curr_frag_len;
1308 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001309 hdr_len = ETH_HLEN;
1310 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001311 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001312 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001313 skb_shinfo(skb)->frags[0].page_offset =
1314 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001315 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001316 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001317 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001318 skb->tail += hdr_len;
1319 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001320 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001321
Sathya Perla2e588f82011-03-11 02:49:26 +00001322 if (rxcp->pkt_size <= rx_frag_size) {
1323 BUG_ON(rxcp->num_rcvd != 1);
1324 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001325 }
1326
1327 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001328 index_inc(&rxcp->rxq_idx, rxq->len);
1329 remaining = rxcp->pkt_size - curr_frag_len;
1330 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001331 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001332 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001333
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001334 /* Coalesce all frags from the same physical page in one slot */
1335 if (page_info->page_offset == 0) {
1336 /* Fresh page */
1337 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001338 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001339 skb_shinfo(skb)->frags[j].page_offset =
1340 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001341 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001342 skb_shinfo(skb)->nr_frags++;
1343 } else {
1344 put_page(page_info->page);
1345 }
1346
Eric Dumazet9e903e02011-10-18 21:00:24 +00001347 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001348 skb->len += curr_frag_len;
1349 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001350 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001351 remaining -= curr_frag_len;
1352 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001353 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001354 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001355 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001356}
1357
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001358/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001359static void be_rx_compl_process(struct be_rx_obj *rxo,
1360 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001361{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001362 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001363 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001364 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001365
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001366 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001367 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001368 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001369 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001370 return;
1371 }
1372
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001373 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001374
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001375 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001376 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001377 else
1378 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001379
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001380 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001381 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001382 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001383 skb->rxhash = rxcp->rss_hash;
1384
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001385
Jiri Pirko343e43c2011-08-25 02:50:51 +00001386 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001387 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001388
1389 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001390}
1391
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001392/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001393void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1394 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001395{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001396 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001397 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001398 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001399 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001400 u16 remaining, curr_frag_len;
1401 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001402
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001403 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001404 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001405 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001406 return;
1407 }
1408
Sathya Perla2e588f82011-03-11 02:49:26 +00001409 remaining = rxcp->pkt_size;
1410 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001411 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001412
1413 curr_frag_len = min(remaining, rx_frag_size);
1414
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001415 /* Coalesce all frags from the same physical page in one slot */
1416 if (i == 0 || page_info->page_offset == 0) {
1417 /* First frag or Fresh page */
1418 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001419 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001420 skb_shinfo(skb)->frags[j].page_offset =
1421 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001422 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001423 } else {
1424 put_page(page_info->page);
1425 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001426 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001427 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001428 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001429 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001430 memset(page_info, 0, sizeof(*page_info));
1431 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001432 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001433
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001434 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001435 skb->len = rxcp->pkt_size;
1436 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001437 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001438 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001439 if (adapter->netdev->features & NETIF_F_RXHASH)
1440 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001441
Jiri Pirko343e43c2011-08-25 02:50:51 +00001442 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001443 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001444
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001445 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001446}
1447
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001448static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1449 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001450{
Sathya Perla2e588f82011-03-11 02:49:26 +00001451 rxcp->pkt_size =
1452 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1453 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1454 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1455 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001456 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001457 rxcp->ip_csum =
1458 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1459 rxcp->l4_csum =
1460 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1461 rxcp->ipv6 =
1462 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1463 rxcp->rxq_idx =
1464 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1465 rxcp->num_rcvd =
1466 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1467 rxcp->pkt_type =
1468 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001469 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001470 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001471 if (rxcp->vlanf) {
1472 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001473 compl);
1474 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1475 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001476 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001477 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001478}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001479
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001480static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1481 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001482{
1483 rxcp->pkt_size =
1484 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1485 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1486 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1487 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001488 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001489 rxcp->ip_csum =
1490 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1491 rxcp->l4_csum =
1492 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1493 rxcp->ipv6 =
1494 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1495 rxcp->rxq_idx =
1496 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1497 rxcp->num_rcvd =
1498 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1499 rxcp->pkt_type =
1500 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001501 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001502 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001503 if (rxcp->vlanf) {
1504 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001505 compl);
1506 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1507 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001508 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001509 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001510}
1511
1512static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1513{
1514 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1515 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1516 struct be_adapter *adapter = rxo->adapter;
1517
1518 /* For checking the valid bit it is Ok to use either definition as the
1519 * valid bit is at the same position in both v0 and v1 Rx compl */
1520 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001521 return NULL;
1522
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001523 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001524 be_dws_le_to_cpu(compl, sizeof(*compl));
1525
1526 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001527 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001528 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001529 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001530
Sathya Perla15d72182011-03-21 20:49:26 +00001531 if (rxcp->vlanf) {
1532 /* vlanf could be wrongly set in some cards.
1533 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001534 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001535 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001536
Sathya Perla15d72182011-03-21 20:49:26 +00001537 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001538 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001539
Somnath Kotur939cf302011-08-18 21:51:49 -07001540 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001541 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001542 rxcp->vlanf = 0;
1543 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001544
1545 /* As the compl has been parsed, reset it; we wont touch it again */
1546 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001547
Sathya Perla3abcded2010-10-03 22:12:27 -07001548 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001549 return rxcp;
1550}
1551
Eric Dumazet1829b082011-03-01 05:48:12 +00001552static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001553{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001554 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001555
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001556 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001557 gfp |= __GFP_COMP;
1558 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001559}
1560
1561/*
1562 * Allocate a page, split it to fragments of size rx_frag_size and post as
1563 * receive buffers to BE
1564 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001565static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001566{
Sathya Perla3abcded2010-10-03 22:12:27 -07001567 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001568 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001569 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001570 struct page *pagep = NULL;
1571 struct be_eth_rx_d *rxd;
1572 u64 page_dmaaddr = 0, frag_dmaaddr;
1573 u32 posted, page_offset = 0;
1574
Sathya Perla3abcded2010-10-03 22:12:27 -07001575 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001576 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1577 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001578 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001579 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001580 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001581 break;
1582 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001583 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1584 0, adapter->big_page_size,
1585 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001586 page_info->page_offset = 0;
1587 } else {
1588 get_page(pagep);
1589 page_info->page_offset = page_offset + rx_frag_size;
1590 }
1591 page_offset = page_info->page_offset;
1592 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001593 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001594 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1595
1596 rxd = queue_head_node(rxq);
1597 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1598 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001599
1600 /* Any space left in the current big page for another frag? */
1601 if ((page_offset + rx_frag_size + rx_frag_size) >
1602 adapter->big_page_size) {
1603 pagep = NULL;
1604 page_info->last_page_user = true;
1605 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001606
1607 prev_page_info = page_info;
1608 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001609 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001610 }
1611 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001612 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001613
1614 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001615 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001616 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001617 } else if (atomic_read(&rxq->used) == 0) {
1618 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001619 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001620 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001621}
1622
Sathya Perla5fb379e2009-06-18 00:02:59 +00001623static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001624{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001625 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1626
1627 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1628 return NULL;
1629
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001630 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001631 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1632
1633 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1634
1635 queue_tail_inc(tx_cq);
1636 return txcp;
1637}
1638
Sathya Perla3c8def92011-06-12 20:01:58 +00001639static u16 be_tx_compl_process(struct be_adapter *adapter,
1640 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001641{
Sathya Perla3c8def92011-06-12 20:01:58 +00001642 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001643 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001644 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001645 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001646 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1647 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001648
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001649 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001650 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001651 sent_skbs[txq->tail] = NULL;
1652
1653 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001654 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001655
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001656 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001657 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001658 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001659 unmap_tx_frag(&adapter->pdev->dev, wrb,
1660 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001661 unmap_skb_hdr = false;
1662
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001663 num_wrbs++;
1664 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001665 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001666
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001667 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001668 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001669}
1670
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001671/* Return the number of events in the event queue */
1672static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001673{
1674 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001675 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001676
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001677 do {
1678 eqe = queue_tail_node(&eqo->q);
1679 if (eqe->evt == 0)
1680 break;
1681
1682 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001683 eqe->evt = 0;
1684 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001685 queue_tail_inc(&eqo->q);
1686 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001687
1688 return num;
1689}
1690
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001691/* Leaves the EQ is disarmed state */
1692static void be_eq_clean(struct be_eq_obj *eqo)
1693{
1694 int num = events_get(eqo);
1695
1696 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1697}
1698
1699static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001700{
1701 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001702 struct be_queue_info *rxq = &rxo->q;
1703 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001704 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00001705 struct be_adapter *adapter = rxo->adapter;
1706 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001707 u16 tail;
1708
Sathya Perlad23e9462012-12-17 19:38:51 +00001709 /* Consume pending rx completions.
1710 * Wait for the flush completion (identified by zero num_rcvd)
1711 * to arrive. Notify CQ even when there are no more CQ entries
1712 * for HW to flush partially coalesced CQ entries.
1713 * In Lancer, there is no need to wait for flush compl.
1714 */
1715 for (;;) {
1716 rxcp = be_rx_compl_get(rxo);
1717 if (rxcp == NULL) {
1718 if (lancer_chip(adapter))
1719 break;
1720
1721 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1722 dev_warn(&adapter->pdev->dev,
1723 "did not receive flush compl\n");
1724 break;
1725 }
1726 be_cq_notify(adapter, rx_cq->id, true, 0);
1727 mdelay(1);
1728 } else {
1729 be_rx_compl_discard(rxo, rxcp);
1730 be_cq_notify(adapter, rx_cq->id, true, 1);
1731 if (rxcp->num_rcvd == 0)
1732 break;
1733 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001734 }
1735
Sathya Perlad23e9462012-12-17 19:38:51 +00001736 /* After cleanup, leave the CQ in unarmed state */
1737 be_cq_notify(adapter, rx_cq->id, false, 0);
1738
1739 /* Then free posted rx buffers that were not used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001740 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001741 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001742 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001743 put_page(page_info->page);
1744 memset(page_info, 0, sizeof(*page_info));
1745 }
1746 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001747 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001748}
1749
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001750static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001751{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001752 struct be_tx_obj *txo;
1753 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001754 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001755 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001756 struct sk_buff *sent_skb;
1757 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001758 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001759
Sathya Perlaa8e91792009-08-10 03:42:43 +00001760 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1761 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001762 pending_txqs = adapter->num_tx_qs;
1763
1764 for_all_tx_queues(adapter, txo, i) {
1765 txq = &txo->q;
1766 while ((txcp = be_tx_compl_get(&txo->cq))) {
1767 end_idx =
1768 AMAP_GET_BITS(struct amap_eth_tx_compl,
1769 wrb_index, txcp);
1770 num_wrbs += be_tx_compl_process(adapter, txo,
1771 end_idx);
1772 cmpl++;
1773 }
1774 if (cmpl) {
1775 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1776 atomic_sub(num_wrbs, &txq->used);
1777 cmpl = 0;
1778 num_wrbs = 0;
1779 }
1780 if (atomic_read(&txq->used) == 0)
1781 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001782 }
1783
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001784 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001785 break;
1786
1787 mdelay(1);
1788 } while (true);
1789
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001790 for_all_tx_queues(adapter, txo, i) {
1791 txq = &txo->q;
1792 if (atomic_read(&txq->used))
1793 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1794 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001795
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001796 /* free posted tx for which compls will never arrive */
1797 while (atomic_read(&txq->used)) {
1798 sent_skb = txo->sent_skb_list[txq->tail];
1799 end_idx = txq->tail;
1800 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1801 &dummy_wrb);
1802 index_adv(&end_idx, num_wrbs - 1, txq->len);
1803 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1804 atomic_sub(num_wrbs, &txq->used);
1805 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001806 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001807}
1808
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001809static void be_evt_queues_destroy(struct be_adapter *adapter)
1810{
1811 struct be_eq_obj *eqo;
1812 int i;
1813
1814 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001815 if (eqo->q.created) {
1816 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001817 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001818 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001819 be_queue_free(adapter, &eqo->q);
1820 }
1821}
1822
1823static int be_evt_queues_create(struct be_adapter *adapter)
1824{
1825 struct be_queue_info *eq;
1826 struct be_eq_obj *eqo;
1827 int i, rc;
1828
1829 adapter->num_evt_qs = num_irqs(adapter);
1830
1831 for_all_evt_queues(adapter, eqo, i) {
1832 eqo->adapter = adapter;
1833 eqo->tx_budget = BE_TX_BUDGET;
1834 eqo->idx = i;
1835 eqo->max_eqd = BE_MAX_EQD;
1836 eqo->enable_aic = true;
1837
1838 eq = &eqo->q;
1839 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1840 sizeof(struct be_eq_entry));
1841 if (rc)
1842 return rc;
1843
1844 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1845 if (rc)
1846 return rc;
1847 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001848 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001849}
1850
Sathya Perla5fb379e2009-06-18 00:02:59 +00001851static void be_mcc_queues_destroy(struct be_adapter *adapter)
1852{
1853 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001854
Sathya Perla8788fdc2009-07-27 22:52:03 +00001855 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001856 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001857 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001858 be_queue_free(adapter, q);
1859
Sathya Perla8788fdc2009-07-27 22:52:03 +00001860 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001861 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001862 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001863 be_queue_free(adapter, q);
1864}
1865
1866/* Must be called only after TX qs are created as MCC shares TX EQ */
1867static int be_mcc_queues_create(struct be_adapter *adapter)
1868{
1869 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001870
Sathya Perla8788fdc2009-07-27 22:52:03 +00001871 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001872 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001873 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001874 goto err;
1875
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001876 /* Use the default EQ for MCC completions */
1877 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001878 goto mcc_cq_free;
1879
Sathya Perla8788fdc2009-07-27 22:52:03 +00001880 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001881 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1882 goto mcc_cq_destroy;
1883
Sathya Perla8788fdc2009-07-27 22:52:03 +00001884 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001885 goto mcc_q_free;
1886
1887 return 0;
1888
1889mcc_q_free:
1890 be_queue_free(adapter, q);
1891mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001892 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001893mcc_cq_free:
1894 be_queue_free(adapter, cq);
1895err:
1896 return -1;
1897}
1898
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001899static void be_tx_queues_destroy(struct be_adapter *adapter)
1900{
1901 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001902 struct be_tx_obj *txo;
1903 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001904
Sathya Perla3c8def92011-06-12 20:01:58 +00001905 for_all_tx_queues(adapter, txo, i) {
1906 q = &txo->q;
1907 if (q->created)
1908 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1909 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001910
Sathya Perla3c8def92011-06-12 20:01:58 +00001911 q = &txo->cq;
1912 if (q->created)
1913 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1914 be_queue_free(adapter, q);
1915 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001916}
1917
Sathya Perladafc0fe2011-10-24 02:45:02 +00001918static int be_num_txqs_want(struct be_adapter *adapter)
1919{
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001920 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1921 be_is_mc(adapter) ||
1922 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
Sathya Perlaca34fe32012-11-06 17:48:56 +00001923 BE2_chip(adapter))
Sathya Perladafc0fe2011-10-24 02:45:02 +00001924 return 1;
1925 else
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001926 return adapter->max_tx_queues;
Sathya Perladafc0fe2011-10-24 02:45:02 +00001927}
1928
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001929static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001930{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001931 struct be_queue_info *cq, *eq;
1932 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001933 struct be_tx_obj *txo;
1934 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001935
Sathya Perladafc0fe2011-10-24 02:45:02 +00001936 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001937 if (adapter->num_tx_qs != MAX_TX_QS) {
1938 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001939 netif_set_real_num_tx_queues(adapter->netdev,
1940 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001941 rtnl_unlock();
1942 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001943
Sathya Perla3c8def92011-06-12 20:01:58 +00001944 for_all_tx_queues(adapter, txo, i) {
1945 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001946 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1947 sizeof(struct be_eth_tx_compl));
1948 if (status)
1949 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001950
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001951 /* If num_evt_qs is less than num_tx_qs, then more than
1952 * one txq share an eq
1953 */
1954 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1955 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1956 if (status)
1957 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001958 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001959 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001960}
1961
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001962static int be_tx_qs_create(struct be_adapter *adapter)
1963{
1964 struct be_tx_obj *txo;
1965 int i, status;
1966
1967 for_all_tx_queues(adapter, txo, i) {
1968 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1969 sizeof(struct be_eth_wrb));
1970 if (status)
1971 return status;
1972
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001973 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001974 if (status)
1975 return status;
1976 }
1977
Sathya Perlad3791422012-09-28 04:39:44 +00001978 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1979 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001980 return 0;
1981}
1982
1983static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001984{
1985 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001986 struct be_rx_obj *rxo;
1987 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001988
Sathya Perla3abcded2010-10-03 22:12:27 -07001989 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001990 q = &rxo->cq;
1991 if (q->created)
1992 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1993 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001994 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001995}
1996
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001997static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001998{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001999 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002000 struct be_rx_obj *rxo;
2001 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002002
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002003 /* We'll create as many RSS rings as there are irqs.
2004 * But when there's only one irq there's no use creating RSS rings
2005 */
2006 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2007 num_irqs(adapter) + 1 : 1;
Sathya Perla7f640062012-06-05 19:37:20 +00002008 if (adapter->num_rx_qs != MAX_RX_QS) {
2009 rtnl_lock();
2010 netif_set_real_num_rx_queues(adapter->netdev,
2011 adapter->num_rx_qs);
2012 rtnl_unlock();
2013 }
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002014
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002015 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002016 for_all_rx_queues(adapter, rxo, i) {
2017 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002018 cq = &rxo->cq;
2019 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2020 sizeof(struct be_eth_rx_compl));
2021 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002022 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002023
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002024 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2025 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002026 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002027 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002028 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002029
Sathya Perlad3791422012-09-28 04:39:44 +00002030 dev_info(&adapter->pdev->dev,
2031 "created %d RSS queue(s) and 1 default RX queue\n",
2032 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002033 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002034}
2035
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002036static irqreturn_t be_intx(int irq, void *dev)
2037{
Sathya Perlae49cc342012-11-27 19:50:02 +00002038 struct be_eq_obj *eqo = dev;
2039 struct be_adapter *adapter = eqo->adapter;
2040 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002041
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002042 /* IRQ is not expected when NAPI is scheduled as the EQ
2043 * will not be armed.
2044 * But, this can happen on Lancer INTx where it takes
2045 * a while to de-assert INTx or in BE2 where occasionaly
2046 * an interrupt may be raised even when EQ is unarmed.
2047 * If NAPI is already scheduled, then counting & notifying
2048 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002049 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002050 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002051 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002052 __napi_schedule(&eqo->napi);
2053 if (num_evts)
2054 eqo->spurious_intr = 0;
2055 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002056 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002057
2058 /* Return IRQ_HANDLED only for the the first spurious intr
2059 * after a valid intr to stop the kernel from branding
2060 * this irq as a bad one!
2061 */
2062 if (num_evts || eqo->spurious_intr++ == 0)
2063 return IRQ_HANDLED;
2064 else
2065 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002066}
2067
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002068static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002069{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002070 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002071
Sathya Perla0b545a62012-11-23 00:27:18 +00002072 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2073 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002074 return IRQ_HANDLED;
2075}
2076
Sathya Perla2e588f82011-03-11 02:49:26 +00002077static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002078{
Sathya Perla2e588f82011-03-11 02:49:26 +00002079 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002080}
2081
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002082static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2083 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002084{
Sathya Perla3abcded2010-10-03 22:12:27 -07002085 struct be_adapter *adapter = rxo->adapter;
2086 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002087 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002088 u32 work_done;
2089
2090 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002091 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002092 if (!rxcp)
2093 break;
2094
Sathya Perla12004ae2011-08-02 19:57:46 +00002095 /* Is it a flush compl that has no data */
2096 if (unlikely(rxcp->num_rcvd == 0))
2097 goto loop_continue;
2098
2099 /* Discard compl with partial DMA Lancer B0 */
2100 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002101 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002102 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002103 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002104
Sathya Perla12004ae2011-08-02 19:57:46 +00002105 /* On BE drop pkts that arrive due to imperfect filtering in
2106 * promiscuous mode on some skews
2107 */
2108 if (unlikely(rxcp->port != adapter->port_num &&
2109 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002110 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002111 goto loop_continue;
2112 }
2113
2114 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002115 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002116 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002117 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002118loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002119 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002120 }
2121
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002122 if (work_done) {
2123 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002124
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002125 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2126 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002127 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002128
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002129 return work_done;
2130}
2131
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002132static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2133 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002134{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002135 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002136 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002137
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002138 for (work_done = 0; work_done < budget; work_done++) {
2139 txcp = be_tx_compl_get(&txo->cq);
2140 if (!txcp)
2141 break;
2142 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002143 AMAP_GET_BITS(struct amap_eth_tx_compl,
2144 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002145 }
2146
2147 if (work_done) {
2148 be_cq_notify(adapter, txo->cq.id, true, work_done);
2149 atomic_sub(num_wrbs, &txo->q.used);
2150
2151 /* As Tx wrbs have been freed up, wake up netdev queue
2152 * if it was stopped due to lack of tx wrbs. */
2153 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2154 atomic_read(&txo->q.used) < txo->q.len / 2) {
2155 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002156 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002157
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002158 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2159 tx_stats(txo)->tx_compl += work_done;
2160 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2161 }
2162 return (work_done < budget); /* Done */
2163}
Sathya Perla3c8def92011-06-12 20:01:58 +00002164
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002165int be_poll(struct napi_struct *napi, int budget)
2166{
2167 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2168 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002169 int max_work = 0, work, i, num_evts;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002170 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002171
Sathya Perla0b545a62012-11-23 00:27:18 +00002172 num_evts = events_get(eqo);
2173
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002174 /* Process all TXQs serviced by this EQ */
2175 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2176 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2177 eqo->tx_budget, i);
2178 if (!tx_done)
2179 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002180 }
2181
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002182 /* This loop will iterate twice for EQ0 in which
2183 * completions of the last RXQ (default one) are also processed
2184 * For other EQs the loop iterates only once
2185 */
2186 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2187 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2188 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002189 }
2190
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002191 if (is_mcc_eqo(eqo))
2192 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002193
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002194 if (max_work < budget) {
2195 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002196 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002197 } else {
2198 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002199 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002200 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002201 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002202}
2203
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002204void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002205{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002206 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2207 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002208 u32 i;
2209
Sathya Perlad23e9462012-12-17 19:38:51 +00002210 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002211 return;
2212
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002213 if (lancer_chip(adapter)) {
2214 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2215 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2216 sliport_err1 = ioread32(adapter->db +
2217 SLIPORT_ERROR1_OFFSET);
2218 sliport_err2 = ioread32(adapter->db +
2219 SLIPORT_ERROR2_OFFSET);
2220 }
2221 } else {
2222 pci_read_config_dword(adapter->pdev,
2223 PCICFG_UE_STATUS_LOW, &ue_lo);
2224 pci_read_config_dword(adapter->pdev,
2225 PCICFG_UE_STATUS_HIGH, &ue_hi);
2226 pci_read_config_dword(adapter->pdev,
2227 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2228 pci_read_config_dword(adapter->pdev,
2229 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002230
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002231 ue_lo = (ue_lo & ~ue_lo_mask);
2232 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002233 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002234
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002235 /* On certain platforms BE hardware can indicate spurious UEs.
2236 * Allow the h/w to stop working completely in case of a real UE.
2237 * Hence not setting the hw_error for UE detection.
2238 */
2239 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002240 adapter->hw_error = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002241 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002242 "Error detected in the card\n");
2243 }
2244
2245 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2246 dev_err(&adapter->pdev->dev,
2247 "ERR: sliport status 0x%x\n", sliport_status);
2248 dev_err(&adapter->pdev->dev,
2249 "ERR: sliport error1 0x%x\n", sliport_err1);
2250 dev_err(&adapter->pdev->dev,
2251 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002252 }
2253
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002254 if (ue_lo) {
2255 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2256 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002257 dev_err(&adapter->pdev->dev,
2258 "UE: %s bit set\n", ue_status_low_desc[i]);
2259 }
2260 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002261
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002262 if (ue_hi) {
2263 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2264 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002265 dev_err(&adapter->pdev->dev,
2266 "UE: %s bit set\n", ue_status_hi_desc[i]);
2267 }
2268 }
2269
2270}
2271
Sathya Perla8d56ff12009-11-22 22:02:26 +00002272static void be_msix_disable(struct be_adapter *adapter)
2273{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002274 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002275 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002276 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002277 }
2278}
2279
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002280static uint be_num_rss_want(struct be_adapter *adapter)
2281{
Yuval Mintz30e80b52012-07-01 03:19:00 +00002282 u32 num = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002283
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002284 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002285 (lancer_chip(adapter) ||
2286 (!sriov_want(adapter) && be_physfn(adapter)))) {
2287 num = adapter->max_rss_queues;
Yuval Mintz30e80b52012-07-01 03:19:00 +00002288 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2289 }
2290 return num;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002291}
2292
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002293static void be_msix_enable(struct be_adapter *adapter)
2294{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002295#define BE_MIN_MSIX_VECTORS 1
Parav Pandit045508a2012-03-26 14:27:13 +00002296 int i, status, num_vec, num_roce_vec = 0;
Sathya Perlad3791422012-09-28 04:39:44 +00002297 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002298
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002299 /* If RSS queues are not used, need a vec for default RX Q */
2300 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
Parav Pandit045508a2012-03-26 14:27:13 +00002301 if (be_roce_supported(adapter)) {
2302 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2303 (num_online_cpus() + 1));
2304 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2305 num_vec += num_roce_vec;
2306 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2307 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002308 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002309
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002310 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002311 adapter->msix_entries[i].entry = i;
2312
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002313 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002314 if (status == 0) {
2315 goto done;
2316 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002317 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002318 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002319 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002320 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002321 }
Sathya Perlad3791422012-09-28 04:39:44 +00002322
2323 dev_warn(dev, "MSIx enable failed\n");
Sathya Perla3abcded2010-10-03 22:12:27 -07002324 return;
2325done:
Parav Pandit045508a2012-03-26 14:27:13 +00002326 if (be_roce_supported(adapter)) {
2327 if (num_vec > num_roce_vec) {
2328 adapter->num_msix_vec = num_vec - num_roce_vec;
2329 adapter->num_msix_roce_vec =
2330 num_vec - adapter->num_msix_vec;
2331 } else {
2332 adapter->num_msix_vec = num_vec;
2333 adapter->num_msix_roce_vec = 0;
2334 }
2335 } else
2336 adapter->num_msix_vec = num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002337 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002338 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002339}
2340
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002341static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002342 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002343{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002344 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002345}
2346
2347static int be_msix_register(struct be_adapter *adapter)
2348{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002349 struct net_device *netdev = adapter->netdev;
2350 struct be_eq_obj *eqo;
2351 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002352
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002353 for_all_evt_queues(adapter, eqo, i) {
2354 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2355 vec = be_msix_vec_get(adapter, eqo);
2356 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002357 if (status)
2358 goto err_msix;
2359 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002360
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002361 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002362err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002363 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2364 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2365 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2366 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002367 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002368 return status;
2369}
2370
2371static int be_irq_register(struct be_adapter *adapter)
2372{
2373 struct net_device *netdev = adapter->netdev;
2374 int status;
2375
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002376 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002377 status = be_msix_register(adapter);
2378 if (status == 0)
2379 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002380 /* INTx is not supported for VF */
2381 if (!be_physfn(adapter))
2382 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002383 }
2384
Sathya Perlae49cc342012-11-27 19:50:02 +00002385 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002386 netdev->irq = adapter->pdev->irq;
2387 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002388 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002389 if (status) {
2390 dev_err(&adapter->pdev->dev,
2391 "INTx request IRQ failed - err %d\n", status);
2392 return status;
2393 }
2394done:
2395 adapter->isr_registered = true;
2396 return 0;
2397}
2398
2399static void be_irq_unregister(struct be_adapter *adapter)
2400{
2401 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002402 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002403 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002404
2405 if (!adapter->isr_registered)
2406 return;
2407
2408 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002409 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002410 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002411 goto done;
2412 }
2413
2414 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002415 for_all_evt_queues(adapter, eqo, i)
2416 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002417
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002418done:
2419 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002420}
2421
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002422static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002423{
2424 struct be_queue_info *q;
2425 struct be_rx_obj *rxo;
2426 int i;
2427
2428 for_all_rx_queues(adapter, rxo, i) {
2429 q = &rxo->q;
2430 if (q->created) {
2431 be_cmd_rxq_destroy(adapter, q);
2432 /* After the rxq is invalidated, wait for a grace time
2433 * of 1ms for all dma to end and the flush compl to
2434 * arrive
2435 */
2436 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002437 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002438 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002439 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002440 }
2441}
2442
Sathya Perla889cd4b2010-05-30 23:33:45 +00002443static int be_close(struct net_device *netdev)
2444{
2445 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002446 struct be_eq_obj *eqo;
2447 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002448
Parav Pandit045508a2012-03-26 14:27:13 +00002449 be_roce_dev_close(adapter);
2450
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002451 for_all_evt_queues(adapter, eqo, i)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002452 napi_disable(&eqo->napi);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002453
2454 be_async_mcc_disable(adapter);
2455
2456 /* Wait for all pending tx completions to arrive so that
2457 * all tx skbs are freed.
2458 */
2459 be_tx_compl_clean(adapter);
2460
2461 be_rx_qs_destroy(adapter);
2462
2463 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002464 if (msix_enabled(adapter))
2465 synchronize_irq(be_msix_vec_get(adapter, eqo));
2466 else
2467 synchronize_irq(netdev->irq);
2468 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002469 }
2470
Sathya Perla889cd4b2010-05-30 23:33:45 +00002471 be_irq_unregister(adapter);
2472
Sathya Perla482c9e72011-06-29 23:33:17 +00002473 return 0;
2474}
2475
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002476static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002477{
2478 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002479 int rc, i, j;
2480 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002481
2482 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002483 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2484 sizeof(struct be_eth_rx_d));
2485 if (rc)
2486 return rc;
2487 }
2488
2489 /* The FW would like the default RXQ to be created first */
2490 rxo = default_rxo(adapter);
2491 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2492 adapter->if_handle, false, &rxo->rss_id);
2493 if (rc)
2494 return rc;
2495
2496 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002497 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002498 rx_frag_size, adapter->if_handle,
2499 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002500 if (rc)
2501 return rc;
2502 }
2503
2504 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002505 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2506 for_all_rss_queues(adapter, rxo, i) {
2507 if ((j + i) >= 128)
2508 break;
2509 rsstable[j + i] = rxo->rss_id;
2510 }
2511 }
2512 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002513 if (rc)
2514 return rc;
2515 }
2516
2517 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002518 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002519 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002520 return 0;
2521}
2522
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002523static int be_open(struct net_device *netdev)
2524{
2525 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002526 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002527 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002528 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002529 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002530 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002531
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002532 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002533 if (status)
2534 goto err;
2535
Sathya Perla5fb379e2009-06-18 00:02:59 +00002536 be_irq_register(adapter);
2537
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002538 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002539 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002540
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002541 for_all_tx_queues(adapter, txo, i)
2542 be_cq_notify(adapter, txo->cq.id, true, 0);
2543
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002544 be_async_mcc_enable(adapter);
2545
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002546 for_all_evt_queues(adapter, eqo, i) {
2547 napi_enable(&eqo->napi);
2548 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2549 }
2550
Sathya Perla323ff712012-09-28 04:39:43 +00002551 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002552 if (!status)
2553 be_link_status_update(adapter, link_status);
2554
Parav Pandit045508a2012-03-26 14:27:13 +00002555 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002556 return 0;
2557err:
2558 be_close(adapter->netdev);
2559 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002560}
2561
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002562static int be_setup_wol(struct be_adapter *adapter, bool enable)
2563{
2564 struct be_dma_mem cmd;
2565 int status = 0;
2566 u8 mac[ETH_ALEN];
2567
2568 memset(mac, 0, ETH_ALEN);
2569
2570 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002571 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
Joe Perches1f9061d22013-03-15 07:23:58 +00002572 GFP_KERNEL | __GFP_ZERO);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002573 if (cmd.va == NULL)
2574 return -1;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002575
2576 if (enable) {
2577 status = pci_write_config_dword(adapter->pdev,
2578 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2579 if (status) {
2580 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002581 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002582 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2583 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002584 return status;
2585 }
2586 status = be_cmd_enable_magic_wol(adapter,
2587 adapter->netdev->dev_addr, &cmd);
2588 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2589 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2590 } else {
2591 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2592 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2593 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2594 }
2595
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002596 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002597 return status;
2598}
2599
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002600/*
2601 * Generate a seed MAC address from the PF MAC Address using jhash.
2602 * MAC Address for VFs are assigned incrementally starting from the seed.
2603 * These addresses are programmed in the ASIC by the PF and the VF driver
2604 * queries for the MAC address during its probe.
2605 */
Sathya Perla4c876612013-02-03 20:30:11 +00002606static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002607{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002608 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002609 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002610 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002611 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002612
2613 be_vf_eth_addr_generate(adapter, mac);
2614
Sathya Perla11ac75e2011-12-13 00:58:50 +00002615 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002616 if (lancer_chip(adapter)) {
2617 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2618 } else {
2619 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002620 vf_cfg->if_handle,
2621 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002622 }
2623
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002624 if (status)
2625 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002626 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002627 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002628 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002629
2630 mac[5] += 1;
2631 }
2632 return status;
2633}
2634
Sathya Perla4c876612013-02-03 20:30:11 +00002635static int be_vfs_mac_query(struct be_adapter *adapter)
2636{
2637 int status, vf;
2638 u8 mac[ETH_ALEN];
2639 struct be_vf_cfg *vf_cfg;
2640 bool active;
2641
2642 for_all_vfs(adapter, vf_cfg, vf) {
2643 be_cmd_get_mac_from_list(adapter, mac, &active,
2644 &vf_cfg->pmac_id, 0);
2645
2646 status = be_cmd_mac_addr_query(adapter, mac, false,
2647 vf_cfg->if_handle, 0);
2648 if (status)
2649 return status;
2650 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2651 }
2652 return 0;
2653}
2654
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002655static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002656{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002657 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002658 u32 vf;
2659
Sathya Perla39f1d942012-05-08 19:41:24 +00002660 if (be_find_vfs(adapter, ASSIGNED)) {
Sathya Perla4c876612013-02-03 20:30:11 +00002661 dev_warn(&adapter->pdev->dev,
2662 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00002663 goto done;
2664 }
2665
Sathya Perla11ac75e2011-12-13 00:58:50 +00002666 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002667 if (lancer_chip(adapter))
2668 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2669 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002670 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2671 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002672
Sathya Perla11ac75e2011-12-13 00:58:50 +00002673 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2674 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002675 pci_disable_sriov(adapter->pdev);
2676done:
2677 kfree(adapter->vf_cfg);
2678 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002679}
2680
Sathya Perlaa54769f2011-10-24 02:45:00 +00002681static int be_clear(struct be_adapter *adapter)
2682{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002683 int i = 1;
2684
Sathya Perla191eb752012-02-23 18:50:13 +00002685 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2686 cancel_delayed_work_sync(&adapter->work);
2687 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2688 }
2689
Sathya Perla11ac75e2011-12-13 00:58:50 +00002690 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002691 be_vf_clear(adapter);
2692
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002693 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2694 be_cmd_pmac_del(adapter, adapter->if_handle,
2695 adapter->pmac_id[i], 0);
2696
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002697 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002698
2699 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002700 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002701 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002702 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002703
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002704 kfree(adapter->pmac_id);
2705 adapter->pmac_id = NULL;
2706
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002707 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002708 return 0;
2709}
2710
Sathya Perla4c876612013-02-03 20:30:11 +00002711static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002712{
Sathya Perla4c876612013-02-03 20:30:11 +00002713 struct be_vf_cfg *vf_cfg;
2714 u32 cap_flags, en_flags, vf;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002715 int status;
2716
Sathya Perla4c876612013-02-03 20:30:11 +00002717 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2718 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002719
Sathya Perla4c876612013-02-03 20:30:11 +00002720 for_all_vfs(adapter, vf_cfg, vf) {
2721 if (!BE3_chip(adapter))
Vasundhara Volama05f99d2013-04-21 23:28:17 +00002722 be_cmd_get_profile_config(adapter, &cap_flags,
2723 NULL, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00002724
2725 /* If a FW profile exists, then cap_flags are updated */
2726 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2727 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2728 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2729 &vf_cfg->if_handle, vf + 1);
2730 if (status)
2731 goto err;
2732 }
2733err:
2734 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002735}
2736
Sathya Perla39f1d942012-05-08 19:41:24 +00002737static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002738{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002739 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002740 int vf;
2741
Sathya Perla39f1d942012-05-08 19:41:24 +00002742 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2743 GFP_KERNEL);
2744 if (!adapter->vf_cfg)
2745 return -ENOMEM;
2746
Sathya Perla11ac75e2011-12-13 00:58:50 +00002747 for_all_vfs(adapter, vf_cfg, vf) {
2748 vf_cfg->if_handle = -1;
2749 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002750 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002751 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002752}
2753
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002754static int be_vf_setup(struct be_adapter *adapter)
2755{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002756 struct be_vf_cfg *vf_cfg;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002757 u16 def_vlan, lnk_speed;
Sathya Perla4c876612013-02-03 20:30:11 +00002758 int status, old_vfs, vf;
2759 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002760
Sathya Perla4c876612013-02-03 20:30:11 +00002761 old_vfs = be_find_vfs(adapter, ENABLED);
2762 if (old_vfs) {
2763 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2764 if (old_vfs != num_vfs)
2765 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2766 adapter->num_vfs = old_vfs;
Sathya Perla39f1d942012-05-08 19:41:24 +00002767 } else {
Sathya Perla4c876612013-02-03 20:30:11 +00002768 if (num_vfs > adapter->dev_num_vfs)
2769 dev_info(dev, "Device supports %d VFs and not %d\n",
2770 adapter->dev_num_vfs, num_vfs);
2771 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
2772
2773 status = pci_enable_sriov(adapter->pdev, num_vfs);
2774 if (status) {
2775 dev_err(dev, "SRIOV enable failed\n");
2776 adapter->num_vfs = 0;
2777 return 0;
2778 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002779 }
2780
2781 status = be_vf_setup_init(adapter);
2782 if (status)
2783 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002784
Sathya Perla4c876612013-02-03 20:30:11 +00002785 if (old_vfs) {
2786 for_all_vfs(adapter, vf_cfg, vf) {
2787 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2788 if (status)
2789 goto err;
2790 }
2791 } else {
2792 status = be_vfs_if_create(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002793 if (status)
2794 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002795 }
2796
Sathya Perla4c876612013-02-03 20:30:11 +00002797 if (old_vfs) {
2798 status = be_vfs_mac_query(adapter);
2799 if (status)
2800 goto err;
2801 } else {
Sathya Perla39f1d942012-05-08 19:41:24 +00002802 status = be_vf_eth_addr_config(adapter);
2803 if (status)
2804 goto err;
2805 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002806
Sathya Perla11ac75e2011-12-13 00:58:50 +00002807 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla4c876612013-02-03 20:30:11 +00002808 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2809 * Allow full available bandwidth
2810 */
2811 if (BE3_chip(adapter) && !old_vfs)
2812 be_cmd_set_qos(adapter, 1000, vf+1);
2813
2814 status = be_cmd_link_status_query(adapter, &lnk_speed,
2815 NULL, vf + 1);
2816 if (!status)
2817 vf_cfg->tx_rate = lnk_speed;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002818
2819 status = be_cmd_get_hsw_config(adapter, &def_vlan,
Sathya Perla4c876612013-02-03 20:30:11 +00002820 vf + 1, vf_cfg->if_handle);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002821 if (status)
2822 goto err;
2823 vf_cfg->def_vid = def_vlan;
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00002824
2825 be_cmd_enable_vf(adapter, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002826 }
2827 return 0;
2828err:
Sathya Perla4c876612013-02-03 20:30:11 +00002829 dev_err(dev, "VF setup failed\n");
2830 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002831 return status;
2832}
2833
Sathya Perla30128032011-11-10 19:17:57 +00002834static void be_setup_init(struct be_adapter *adapter)
2835{
2836 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002837 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002838 adapter->if_handle = -1;
2839 adapter->be3_native = false;
2840 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002841 if (be_physfn(adapter))
2842 adapter->cmd_privileges = MAX_PRIVILEGES;
2843 else
2844 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00002845}
2846
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002847static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2848 bool *active_mac, u32 *pmac_id)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002849{
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002850 int status = 0;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002851
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002852 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2853 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2854 if (!lancer_chip(adapter) && !be_physfn(adapter))
2855 *active_mac = true;
2856 else
2857 *active_mac = false;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002858
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002859 return status;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002860 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002861
2862 if (lancer_chip(adapter)) {
2863 status = be_cmd_get_mac_from_list(adapter, mac,
2864 active_mac, pmac_id, 0);
2865 if (*active_mac) {
Sathya Perla5ee49792012-09-28 04:39:41 +00002866 status = be_cmd_mac_addr_query(adapter, mac, false,
2867 if_handle, *pmac_id);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002868 }
2869 } else if (be_physfn(adapter)) {
2870 /* For BE3, for PF get permanent MAC */
Sathya Perla5ee49792012-09-28 04:39:41 +00002871 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002872 *active_mac = false;
2873 } else {
2874 /* For BE3, for VF get soft MAC assigned by PF*/
Sathya Perla5ee49792012-09-28 04:39:41 +00002875 status = be_cmd_mac_addr_query(adapter, mac, false,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002876 if_handle, 0);
2877 *active_mac = true;
2878 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002879 return status;
2880}
2881
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002882static void be_get_resources(struct be_adapter *adapter)
2883{
Sathya Perla4c876612013-02-03 20:30:11 +00002884 u16 dev_num_vfs;
2885 int pos, status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002886 bool profile_present = false;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00002887 u16 txq_count = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002888
Sathya Perla4c876612013-02-03 20:30:11 +00002889 if (!BEx_chip(adapter)) {
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002890 status = be_cmd_get_func_config(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002891 if (!status)
2892 profile_present = true;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00002893 } else if (BE3_chip(adapter) && be_physfn(adapter)) {
2894 be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002895 }
2896
2897 if (profile_present) {
2898 /* Sanity fixes for Lancer */
2899 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2900 BE_UC_PMAC_COUNT);
2901 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2902 BE_NUM_VLANS_SUPPORTED);
2903 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2904 BE_MAX_MC);
2905 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2906 MAX_TX_QS);
2907 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2908 BE3_MAX_RSS_QS);
2909 adapter->max_event_queues = min_t(u16,
2910 adapter->max_event_queues,
2911 BE3_MAX_RSS_QS);
2912
2913 if (adapter->max_rss_queues &&
2914 adapter->max_rss_queues == adapter->max_rx_queues)
2915 adapter->max_rss_queues -= 1;
2916
2917 if (adapter->max_event_queues < adapter->max_rss_queues)
2918 adapter->max_rss_queues = adapter->max_event_queues;
2919
2920 } else {
2921 if (be_physfn(adapter))
2922 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2923 else
2924 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2925
2926 if (adapter->function_mode & FLEX10_MODE)
2927 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2928 else
2929 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2930
2931 adapter->max_mcast_mac = BE_MAX_MC;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00002932 adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
2933 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2934 MAX_TX_QS);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002935 adapter->max_rss_queues = (adapter->be3_native) ?
2936 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2937 adapter->max_event_queues = BE3_MAX_RSS_QS;
2938
2939 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2940 BE_IF_FLAGS_BROADCAST |
2941 BE_IF_FLAGS_MULTICAST |
2942 BE_IF_FLAGS_PASS_L3L4_ERRORS |
2943 BE_IF_FLAGS_MCAST_PROMISCUOUS |
2944 BE_IF_FLAGS_VLAN_PROMISCUOUS |
2945 BE_IF_FLAGS_PROMISCUOUS;
2946
2947 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2948 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2949 }
Sathya Perla4c876612013-02-03 20:30:11 +00002950
2951 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2952 if (pos) {
2953 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2954 &dev_num_vfs);
2955 if (BE3_chip(adapter))
2956 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
2957 adapter->dev_num_vfs = dev_num_vfs;
2958 }
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002959}
2960
Sathya Perla39f1d942012-05-08 19:41:24 +00002961/* Routine to query per function resource limits */
2962static int be_get_config(struct be_adapter *adapter)
2963{
Sathya Perla4c876612013-02-03 20:30:11 +00002964 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00002965
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002966 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2967 &adapter->function_mode,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00002968 &adapter->function_caps,
2969 &adapter->asic_rev);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002970 if (status)
2971 goto err;
2972
2973 be_get_resources(adapter);
2974
2975 /* primary mac needs 1 pmac entry */
2976 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2977 sizeof(u32), GFP_KERNEL);
2978 if (!adapter->pmac_id) {
2979 status = -ENOMEM;
2980 goto err;
2981 }
2982
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002983err:
2984 return status;
Sathya Perla39f1d942012-05-08 19:41:24 +00002985}
2986
Sathya Perla5fb379e2009-06-18 00:02:59 +00002987static int be_setup(struct be_adapter *adapter)
2988{
Sathya Perla39f1d942012-05-08 19:41:24 +00002989 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002990 u32 en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002991 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002992 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002993 u8 mac[ETH_ALEN];
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002994 bool active_mac;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002995
Sathya Perla30128032011-11-10 19:17:57 +00002996 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002997
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002998 if (!lancer_chip(adapter))
2999 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003000
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003001 status = be_get_config(adapter);
3002 if (status)
3003 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003004
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003005 be_msix_enable(adapter);
3006
3007 status = be_evt_queues_create(adapter);
3008 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003009 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003010
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003011 status = be_tx_cqs_create(adapter);
3012 if (status)
3013 goto err;
3014
3015 status = be_rx_cqs_create(adapter);
3016 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003017 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003018
Sathya Perla5fb379e2009-06-18 00:02:59 +00003019 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003020 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003021 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003022
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003023 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3024 /* In UMC mode FW does not return right privileges.
3025 * Override with correct privilege equivalent to PF.
3026 */
3027 if (be_is_mc(adapter))
3028 adapter->cmd_privileges = MAX_PRIVILEGES;
3029
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003030 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3031 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00003032
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003033 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003034 en_flags |= BE_IF_FLAGS_RSS;
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003035
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003036 en_flags = en_flags & adapter->if_cap_flags;
Padmanabh Ratnakar0b13fb42012-07-18 02:51:58 +00003037
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003038 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003039 &adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003040 if (status != 0)
3041 goto err;
3042
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003043 memset(mac, 0, ETH_ALEN);
3044 active_mac = false;
3045 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3046 &active_mac, &adapter->pmac_id[0]);
3047 if (status != 0)
3048 goto err;
3049
3050 if (!active_mac) {
3051 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3052 &adapter->pmac_id[0], 0);
3053 if (status != 0)
3054 goto err;
3055 }
3056
3057 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3058 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3059 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003060 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00003061
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003062 status = be_tx_qs_create(adapter);
3063 if (status)
3064 goto err;
3065
Sathya Perla04b71172011-09-27 13:30:27 -04003066 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003067
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003068 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003069 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003070
3071 be_set_rx_mode(adapter->netdev);
3072
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003073 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003074
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003075 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3076 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003077 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003078
Sathya Perla39f1d942012-05-08 19:41:24 +00003079 if (be_physfn(adapter) && num_vfs) {
3080 if (adapter->dev_num_vfs)
3081 be_vf_setup(adapter);
3082 else
3083 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003084 }
3085
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003086 status = be_cmd_get_phy_info(adapter);
3087 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003088 adapter->phy.fc_autoneg = 1;
3089
Sathya Perla191eb752012-02-23 18:50:13 +00003090 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3091 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003092 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003093err:
3094 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003095 return status;
3096}
3097
Ivan Vecera66268732011-12-08 01:31:21 +00003098#ifdef CONFIG_NET_POLL_CONTROLLER
3099static void be_netpoll(struct net_device *netdev)
3100{
3101 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003102 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003103 int i;
3104
Sathya Perlae49cc342012-11-27 19:50:02 +00003105 for_all_evt_queues(adapter, eqo, i) {
3106 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3107 napi_schedule(&eqo->napi);
3108 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003109
3110 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003111}
3112#endif
3113
Ajit Khaparde84517482009-09-04 03:12:16 +00003114#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003115char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3116
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003117static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003118 const u8 *p, u32 img_start, int image_size,
3119 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003120{
3121 u32 crc_offset;
3122 u8 flashed_crc[4];
3123 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003124
3125 crc_offset = hdr_size + img_start + image_size - 4;
3126
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003127 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003128
3129 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003130 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003131 if (status) {
3132 dev_err(&adapter->pdev->dev,
3133 "could not get crc from flash, not flashing redboot\n");
3134 return false;
3135 }
3136
3137 /*update redboot only if crc does not match*/
3138 if (!memcmp(flashed_crc, p, 4))
3139 return false;
3140 else
3141 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003142}
3143
Sathya Perla306f1342011-08-02 19:57:45 +00003144static bool phy_flashing_required(struct be_adapter *adapter)
3145{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003146 return (adapter->phy.phy_type == TN_8022 &&
3147 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003148}
3149
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003150static bool is_comp_in_ufi(struct be_adapter *adapter,
3151 struct flash_section_info *fsec, int type)
3152{
3153 int i = 0, img_type = 0;
3154 struct flash_section_info_g2 *fsec_g2 = NULL;
3155
Sathya Perlaca34fe32012-11-06 17:48:56 +00003156 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003157 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3158
3159 for (i = 0; i < MAX_FLASH_COMP; i++) {
3160 if (fsec_g2)
3161 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3162 else
3163 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3164
3165 if (img_type == type)
3166 return true;
3167 }
3168 return false;
3169
3170}
3171
3172struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3173 int header_size,
3174 const struct firmware *fw)
3175{
3176 struct flash_section_info *fsec = NULL;
3177 const u8 *p = fw->data;
3178
3179 p += header_size;
3180 while (p < (fw->data + fw->size)) {
3181 fsec = (struct flash_section_info *)p;
3182 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3183 return fsec;
3184 p += 32;
3185 }
3186 return NULL;
3187}
3188
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003189static int be_flash(struct be_adapter *adapter, const u8 *img,
3190 struct be_dma_mem *flash_cmd, int optype, int img_size)
3191{
3192 u32 total_bytes = 0, flash_op, num_bytes = 0;
3193 int status = 0;
3194 struct be_cmd_write_flashrom *req = flash_cmd->va;
3195
3196 total_bytes = img_size;
3197 while (total_bytes) {
3198 num_bytes = min_t(u32, 32*1024, total_bytes);
3199
3200 total_bytes -= num_bytes;
3201
3202 if (!total_bytes) {
3203 if (optype == OPTYPE_PHY_FW)
3204 flash_op = FLASHROM_OPER_PHY_FLASH;
3205 else
3206 flash_op = FLASHROM_OPER_FLASH;
3207 } else {
3208 if (optype == OPTYPE_PHY_FW)
3209 flash_op = FLASHROM_OPER_PHY_SAVE;
3210 else
3211 flash_op = FLASHROM_OPER_SAVE;
3212 }
3213
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003214 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003215 img += num_bytes;
3216 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3217 flash_op, num_bytes);
3218 if (status) {
3219 if (status == ILLEGAL_IOCTL_REQ &&
3220 optype == OPTYPE_PHY_FW)
3221 break;
3222 dev_err(&adapter->pdev->dev,
3223 "cmd to write to flash rom failed.\n");
3224 return status;
3225 }
3226 }
3227 return 0;
3228}
3229
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003230/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003231static int be_flash_BEx(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003232 const struct firmware *fw,
3233 struct be_dma_mem *flash_cmd,
3234 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003235
Ajit Khaparde84517482009-09-04 03:12:16 +00003236{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003237 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003238 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003239 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003240 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003241 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003242 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003243
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003244 struct flash_comp gen3_flash_types[] = {
3245 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3246 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3247 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3248 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3249 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3250 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3251 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3252 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3253 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3254 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3255 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3256 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3257 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3258 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3259 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3260 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3261 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3262 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3263 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3264 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003265 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003266
3267 struct flash_comp gen2_flash_types[] = {
3268 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3269 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3270 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3271 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3272 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3273 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3274 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3275 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3276 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3277 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3278 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3279 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3280 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3281 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3282 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3283 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003284 };
3285
Sathya Perlaca34fe32012-11-06 17:48:56 +00003286 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003287 pflashcomp = gen3_flash_types;
3288 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003289 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003290 } else {
3291 pflashcomp = gen2_flash_types;
3292 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003293 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003294 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003295
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003296 /* Get flash section info*/
3297 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3298 if (!fsec) {
3299 dev_err(&adapter->pdev->dev,
3300 "Invalid Cookie. UFI corrupted ?\n");
3301 return -1;
3302 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003303 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003304 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003305 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003306
3307 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3308 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3309 continue;
3310
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003311 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3312 !phy_flashing_required(adapter))
3313 continue;
3314
3315 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3316 redboot = be_flash_redboot(adapter, fw->data,
3317 pflashcomp[i].offset, pflashcomp[i].size,
3318 filehdr_size + img_hdrs_size);
3319 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003320 continue;
3321 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003322
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003323 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003324 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003325 if (p + pflashcomp[i].size > fw->data + fw->size)
3326 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003327
3328 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3329 pflashcomp[i].size);
3330 if (status) {
3331 dev_err(&adapter->pdev->dev,
3332 "Flashing section type %d failed.\n",
3333 pflashcomp[i].img_type);
3334 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003335 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003336 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003337 return 0;
3338}
3339
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003340static int be_flash_skyhawk(struct be_adapter *adapter,
3341 const struct firmware *fw,
3342 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003343{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003344 int status = 0, i, filehdr_size = 0;
3345 int img_offset, img_size, img_optype, redboot;
3346 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3347 const u8 *p = fw->data;
3348 struct flash_section_info *fsec = NULL;
3349
3350 filehdr_size = sizeof(struct flash_file_hdr_g3);
3351 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3352 if (!fsec) {
3353 dev_err(&adapter->pdev->dev,
3354 "Invalid Cookie. UFI corrupted ?\n");
3355 return -1;
3356 }
3357
3358 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3359 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3360 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3361
3362 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3363 case IMAGE_FIRMWARE_iSCSI:
3364 img_optype = OPTYPE_ISCSI_ACTIVE;
3365 break;
3366 case IMAGE_BOOT_CODE:
3367 img_optype = OPTYPE_REDBOOT;
3368 break;
3369 case IMAGE_OPTION_ROM_ISCSI:
3370 img_optype = OPTYPE_BIOS;
3371 break;
3372 case IMAGE_OPTION_ROM_PXE:
3373 img_optype = OPTYPE_PXE_BIOS;
3374 break;
3375 case IMAGE_OPTION_ROM_FCoE:
3376 img_optype = OPTYPE_FCOE_BIOS;
3377 break;
3378 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3379 img_optype = OPTYPE_ISCSI_BACKUP;
3380 break;
3381 case IMAGE_NCSI:
3382 img_optype = OPTYPE_NCSI_FW;
3383 break;
3384 default:
3385 continue;
3386 }
3387
3388 if (img_optype == OPTYPE_REDBOOT) {
3389 redboot = be_flash_redboot(adapter, fw->data,
3390 img_offset, img_size,
3391 filehdr_size + img_hdrs_size);
3392 if (!redboot)
3393 continue;
3394 }
3395
3396 p = fw->data;
3397 p += filehdr_size + img_offset + img_hdrs_size;
3398 if (p + img_size > fw->data + fw->size)
3399 return -1;
3400
3401 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3402 if (status) {
3403 dev_err(&adapter->pdev->dev,
3404 "Flashing section type %d failed.\n",
3405 fsec->fsec_entry[i].type);
3406 return status;
3407 }
3408 }
3409 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003410}
3411
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003412static int lancer_wait_idle(struct be_adapter *adapter)
3413{
3414#define SLIPORT_IDLE_TIMEOUT 30
3415 u32 reg_val;
3416 int status = 0, i;
3417
3418 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3419 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3420 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3421 break;
3422
3423 ssleep(1);
3424 }
3425
3426 if (i == SLIPORT_IDLE_TIMEOUT)
3427 status = -1;
3428
3429 return status;
3430}
3431
3432static int lancer_fw_reset(struct be_adapter *adapter)
3433{
3434 int status = 0;
3435
3436 status = lancer_wait_idle(adapter);
3437 if (status)
3438 return status;
3439
3440 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3441 PHYSDEV_CONTROL_OFFSET);
3442
3443 return status;
3444}
3445
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003446static int lancer_fw_download(struct be_adapter *adapter,
3447 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003448{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003449#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3450#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3451 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003452 const u8 *data_ptr = NULL;
3453 u8 *dest_image_ptr = NULL;
3454 size_t image_size = 0;
3455 u32 chunk_size = 0;
3456 u32 data_written = 0;
3457 u32 offset = 0;
3458 int status = 0;
3459 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003460 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003461
3462 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3463 dev_err(&adapter->pdev->dev,
3464 "FW Image not properly aligned. "
3465 "Length must be 4 byte aligned.\n");
3466 status = -EINVAL;
3467 goto lancer_fw_exit;
3468 }
3469
3470 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3471 + LANCER_FW_DOWNLOAD_CHUNK;
3472 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00003473 &flash_cmd.dma, GFP_KERNEL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003474 if (!flash_cmd.va) {
3475 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003476 goto lancer_fw_exit;
3477 }
3478
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003479 dest_image_ptr = flash_cmd.va +
3480 sizeof(struct lancer_cmd_req_write_object);
3481 image_size = fw->size;
3482 data_ptr = fw->data;
3483
3484 while (image_size) {
3485 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3486
3487 /* Copy the image chunk content. */
3488 memcpy(dest_image_ptr, data_ptr, chunk_size);
3489
3490 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003491 chunk_size, offset,
3492 LANCER_FW_DOWNLOAD_LOCATION,
3493 &data_written, &change_status,
3494 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003495 if (status)
3496 break;
3497
3498 offset += data_written;
3499 data_ptr += data_written;
3500 image_size -= data_written;
3501 }
3502
3503 if (!status) {
3504 /* Commit the FW written */
3505 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003506 0, offset,
3507 LANCER_FW_DOWNLOAD_LOCATION,
3508 &data_written, &change_status,
3509 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003510 }
3511
3512 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3513 flash_cmd.dma);
3514 if (status) {
3515 dev_err(&adapter->pdev->dev,
3516 "Firmware load error. "
3517 "Status code: 0x%x Additional Status: 0x%x\n",
3518 status, add_status);
3519 goto lancer_fw_exit;
3520 }
3521
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003522 if (change_status == LANCER_FW_RESET_NEEDED) {
3523 status = lancer_fw_reset(adapter);
3524 if (status) {
3525 dev_err(&adapter->pdev->dev,
3526 "Adapter busy for FW reset.\n"
3527 "New FW will not be active.\n");
3528 goto lancer_fw_exit;
3529 }
3530 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3531 dev_err(&adapter->pdev->dev,
3532 "System reboot required for new FW"
3533 " to be active\n");
3534 }
3535
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003536 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3537lancer_fw_exit:
3538 return status;
3539}
3540
Sathya Perlaca34fe32012-11-06 17:48:56 +00003541#define UFI_TYPE2 2
3542#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003543#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00003544#define UFI_TYPE4 4
3545static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003546 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003547{
3548 if (fhdr == NULL)
3549 goto be_get_ufi_exit;
3550
Sathya Perlaca34fe32012-11-06 17:48:56 +00003551 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3552 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003553 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3554 if (fhdr->asic_type_rev == 0x10)
3555 return UFI_TYPE3R;
3556 else
3557 return UFI_TYPE3;
3558 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00003559 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003560
3561be_get_ufi_exit:
3562 dev_err(&adapter->pdev->dev,
3563 "UFI and Interface are not compatible for flashing\n");
3564 return -1;
3565}
3566
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003567static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3568{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003569 struct flash_file_hdr_g3 *fhdr3;
3570 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003571 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003572 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003573 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003574
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003575 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003576 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3577 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003578 if (!flash_cmd.va) {
3579 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003580 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003581 }
3582
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003583 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003584 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003585
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003586 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003587
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003588 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3589 for (i = 0; i < num_imgs; i++) {
3590 img_hdr_ptr = (struct image_hdr *)(fw->data +
3591 (sizeof(struct flash_file_hdr_g3) +
3592 i * sizeof(struct image_hdr)));
3593 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003594 switch (ufi_type) {
3595 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003596 status = be_flash_skyhawk(adapter, fw,
3597 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003598 break;
3599 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00003600 status = be_flash_BEx(adapter, fw, &flash_cmd,
3601 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003602 break;
3603 case UFI_TYPE3:
3604 /* Do not flash this ufi on BE3-R cards */
3605 if (adapter->asic_rev < 0x10)
3606 status = be_flash_BEx(adapter, fw,
3607 &flash_cmd,
3608 num_imgs);
3609 else {
3610 status = -1;
3611 dev_err(&adapter->pdev->dev,
3612 "Can't load BE3 UFI on BE3R\n");
3613 }
3614 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003615 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003616 }
3617
Sathya Perlaca34fe32012-11-06 17:48:56 +00003618 if (ufi_type == UFI_TYPE2)
3619 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003620 else if (ufi_type == -1)
3621 status = -1;
3622
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003623 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3624 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003625 if (status) {
3626 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003627 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003628 }
3629
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003630 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003631
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003632be_fw_exit:
3633 return status;
3634}
3635
3636int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3637{
3638 const struct firmware *fw;
3639 int status;
3640
3641 if (!netif_running(adapter->netdev)) {
3642 dev_err(&adapter->pdev->dev,
3643 "Firmware load not allowed (interface is down)\n");
3644 return -1;
3645 }
3646
3647 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3648 if (status)
3649 goto fw_exit;
3650
3651 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3652
3653 if (lancer_chip(adapter))
3654 status = lancer_fw_download(adapter, fw);
3655 else
3656 status = be_fw_download(adapter, fw);
3657
Ajit Khaparde84517482009-09-04 03:12:16 +00003658fw_exit:
3659 release_firmware(fw);
3660 return status;
3661}
3662
stephen hemmingere5686ad2012-01-05 19:10:25 +00003663static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003664 .ndo_open = be_open,
3665 .ndo_stop = be_close,
3666 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003667 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003668 .ndo_set_mac_address = be_mac_addr_set,
3669 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003670 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003671 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003672 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3673 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003674 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003675 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003676 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003677 .ndo_get_vf_config = be_get_vf_config,
3678#ifdef CONFIG_NET_POLL_CONTROLLER
3679 .ndo_poll_controller = be_netpoll,
3680#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003681};
3682
3683static void be_netdev_init(struct net_device *netdev)
3684{
3685 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003686 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003687 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003688
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003689 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003690 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00003691 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003692 if (be_multi_rxq(adapter))
3693 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003694
3695 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00003696 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003697
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003698 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003699 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003700
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003701 netdev->priv_flags |= IFF_UNICAST_FLT;
3702
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003703 netdev->flags |= IFF_MULTICAST;
3704
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00003705 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003706
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003707 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003708
3709 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3710
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003711 for_all_evt_queues(adapter, eqo, i)
3712 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003713}
3714
3715static void be_unmap_pci_bars(struct be_adapter *adapter)
3716{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003717 if (adapter->csr)
3718 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003719 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00003720 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003721}
3722
Sathya Perlace66f782012-11-06 17:48:58 +00003723static int db_bar(struct be_adapter *adapter)
3724{
3725 if (lancer_chip(adapter) || !be_physfn(adapter))
3726 return 0;
3727 else
3728 return 4;
3729}
3730
3731static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00003732{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00003733 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00003734 adapter->roce_db.size = 4096;
3735 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3736 db_bar(adapter));
3737 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3738 db_bar(adapter));
3739 }
Parav Pandit045508a2012-03-26 14:27:13 +00003740 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003741}
3742
3743static int be_map_pci_bars(struct be_adapter *adapter)
3744{
3745 u8 __iomem *addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003746 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003747
Sathya Perlace66f782012-11-06 17:48:58 +00003748 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3749 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3750 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003751
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003752 if (BEx_chip(adapter) && be_physfn(adapter)) {
3753 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3754 if (adapter->csr == NULL)
3755 return -ENOMEM;
3756 }
3757
Sathya Perlace66f782012-11-06 17:48:58 +00003758 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003759 if (addr == NULL)
3760 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003761 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003762
3763 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003764 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00003765
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003766pci_map_err:
3767 be_unmap_pci_bars(adapter);
3768 return -ENOMEM;
3769}
3770
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003771static void be_ctrl_cleanup(struct be_adapter *adapter)
3772{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003773 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003774
3775 be_unmap_pci_bars(adapter);
3776
3777 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003778 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3779 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003780
Sathya Perla5b8821b2011-08-02 19:57:44 +00003781 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003782 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003783 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3784 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003785}
3786
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003787static int be_ctrl_init(struct be_adapter *adapter)
3788{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003789 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3790 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003791 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00003792 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003793 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003794
Sathya Perlace66f782012-11-06 17:48:58 +00003795 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3796 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3797 SLI_INTF_FAMILY_SHIFT;
3798 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3799
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003800 status = be_map_pci_bars(adapter);
3801 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003802 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003803
3804 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003805 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3806 mbox_mem_alloc->size,
3807 &mbox_mem_alloc->dma,
3808 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003809 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003810 status = -ENOMEM;
3811 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003812 }
3813 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3814 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3815 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3816 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003817
Sathya Perla5b8821b2011-08-02 19:57:44 +00003818 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3819 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
Joe Perches1f9061d22013-03-15 07:23:58 +00003820 &rx_filter->dma,
3821 GFP_KERNEL | __GFP_ZERO);
Sathya Perla5b8821b2011-08-02 19:57:44 +00003822 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003823 status = -ENOMEM;
3824 goto free_mbox;
3825 }
Joe Perches1f9061d22013-03-15 07:23:58 +00003826
Ivan Vecera29849612010-12-14 05:43:19 +00003827 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003828 spin_lock_init(&adapter->mcc_lock);
3829 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003830
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003831 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003832 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003833 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003834
3835free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003836 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3837 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003838
3839unmap_pci_bars:
3840 be_unmap_pci_bars(adapter);
3841
3842done:
3843 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003844}
3845
3846static void be_stats_cleanup(struct be_adapter *adapter)
3847{
Sathya Perla3abcded2010-10-03 22:12:27 -07003848 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003849
3850 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003851 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3852 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003853}
3854
3855static int be_stats_init(struct be_adapter *adapter)
3856{
Sathya Perla3abcded2010-10-03 22:12:27 -07003857 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003858
Sathya Perlaca34fe32012-11-06 17:48:56 +00003859 if (lancer_chip(adapter))
3860 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3861 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003862 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Sathya Perlaca34fe32012-11-06 17:48:56 +00003863 else
3864 /* BE3 and Skyhawk */
3865 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3866
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003867 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
Joe Perches1f9061d22013-03-15 07:23:58 +00003868 GFP_KERNEL | __GFP_ZERO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003869 if (cmd->va == NULL)
3870 return -1;
3871 return 0;
3872}
3873
Bill Pemberton3bc6b062012-12-03 09:23:09 -05003874static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003875{
3876 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003877
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003878 if (!adapter)
3879 return;
3880
Parav Pandit045508a2012-03-26 14:27:13 +00003881 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00003882 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00003883
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003884 cancel_delayed_work_sync(&adapter->func_recovery_work);
3885
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003886 unregister_netdev(adapter->netdev);
3887
Sathya Perla5fb379e2009-06-18 00:02:59 +00003888 be_clear(adapter);
3889
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00003890 /* tell fw we're done with firing cmds */
3891 be_cmd_fw_clean(adapter);
3892
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003893 be_stats_cleanup(adapter);
3894
3895 be_ctrl_cleanup(adapter);
3896
Sathya Perlad6b6d982012-09-05 01:56:48 +00003897 pci_disable_pcie_error_reporting(pdev);
3898
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003899 pci_set_drvdata(pdev, NULL);
3900 pci_release_regions(pdev);
3901 pci_disable_device(pdev);
3902
3903 free_netdev(adapter->netdev);
3904}
3905
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003906bool be_is_wol_supported(struct be_adapter *adapter)
3907{
3908 return ((adapter->wol_cap & BE_WOL_CAP) &&
3909 !be_is_wol_excluded(adapter)) ? true : false;
3910}
3911
Somnath Kotur941a77d2012-05-17 22:59:03 +00003912u32 be_get_fw_log_level(struct be_adapter *adapter)
3913{
3914 struct be_dma_mem extfat_cmd;
3915 struct be_fat_conf_params *cfgs;
3916 int status;
3917 u32 level = 0;
3918 int j;
3919
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003920 if (lancer_chip(adapter))
3921 return 0;
3922
Somnath Kotur941a77d2012-05-17 22:59:03 +00003923 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3924 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3925 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3926 &extfat_cmd.dma);
3927
3928 if (!extfat_cmd.va) {
3929 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3930 __func__);
3931 goto err;
3932 }
3933
3934 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3935 if (!status) {
3936 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3937 sizeof(struct be_cmd_resp_hdr));
Anton Blanchardac46a462012-07-24 15:05:25 +00003938 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
Somnath Kotur941a77d2012-05-17 22:59:03 +00003939 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3940 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3941 }
3942 }
3943 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3944 extfat_cmd.dma);
3945err:
3946 return level;
3947}
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003948
Sathya Perla39f1d942012-05-08 19:41:24 +00003949static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003950{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003951 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00003952 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003953
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003954 status = be_cmd_get_cntl_attributes(adapter);
3955 if (status)
3956 return status;
3957
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003958 status = be_cmd_get_acpi_wol_cap(adapter);
3959 if (status) {
3960 /* in case of a failure to get wol capabillities
3961 * check the exclusion list to determine WOL capability */
3962 if (!be_is_wol_excluded(adapter))
3963 adapter->wol_cap |= BE_WOL_CAP;
3964 }
3965
3966 if (be_is_wol_supported(adapter))
3967 adapter->wol = true;
3968
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00003969 /* Must be a power of 2 or else MODULO will BUG_ON */
3970 adapter->be_get_temp_freq = 64;
3971
Somnath Kotur941a77d2012-05-17 22:59:03 +00003972 level = be_get_fw_log_level(adapter);
3973 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3974
Sathya Perla2243e2e2009-11-22 22:02:03 +00003975 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003976}
3977
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003978static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003979{
3980 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003981
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003982 status = lancer_test_and_set_rdy_state(adapter);
3983 if (status)
3984 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003985
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003986 if (netif_running(adapter->netdev))
3987 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003988
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003989 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003990
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003991 adapter->hw_error = false;
3992 adapter->fw_timeout = false;
3993
3994 status = be_setup(adapter);
3995 if (status)
3996 goto err;
3997
3998 if (netif_running(adapter->netdev)) {
3999 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004000 if (status)
4001 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004002 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004003
4004 dev_err(&adapter->pdev->dev,
4005 "Adapter SLIPORT recovery succeeded\n");
4006 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004007err:
Padmanabh Ratnakar67297ad2012-10-20 06:02:27 +00004008 if (adapter->eeh_error)
4009 dev_err(&adapter->pdev->dev,
4010 "Adapter SLIPORT recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004011
4012 return status;
4013}
4014
4015static void be_func_recovery_task(struct work_struct *work)
4016{
4017 struct be_adapter *adapter =
4018 container_of(work, struct be_adapter, func_recovery_work.work);
4019 int status;
4020
4021 be_detect_error(adapter);
4022
4023 if (adapter->hw_error && lancer_chip(adapter)) {
4024
4025 if (adapter->eeh_error)
4026 goto out;
4027
4028 rtnl_lock();
4029 netif_device_detach(adapter->netdev);
4030 rtnl_unlock();
4031
4032 status = lancer_recover_func(adapter);
4033
4034 if (!status)
4035 netif_device_attach(adapter->netdev);
4036 }
4037
4038out:
4039 schedule_delayed_work(&adapter->func_recovery_work,
4040 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004041}
4042
4043static void be_worker(struct work_struct *work)
4044{
4045 struct be_adapter *adapter =
4046 container_of(work, struct be_adapter, work.work);
4047 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004048 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004049 int i;
4050
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004051 /* when interrupts are not yet enabled, just reap any pending
4052 * mcc completions */
4053 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004054 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004055 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004056 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004057 goto reschedule;
4058 }
4059
4060 if (!adapter->stats_cmd_sent) {
4061 if (lancer_chip(adapter))
4062 lancer_cmd_get_pport_stats(adapter,
4063 &adapter->stats_cmd);
4064 else
4065 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4066 }
4067
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004068 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4069 be_cmd_get_die_temperature(adapter);
4070
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004071 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004072 if (rxo->rx_post_starved) {
4073 rxo->rx_post_starved = false;
4074 be_post_rx_frags(rxo, GFP_KERNEL);
4075 }
4076 }
4077
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004078 for_all_evt_queues(adapter, eqo, i)
4079 be_eqd_update(adapter, eqo);
4080
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004081reschedule:
4082 adapter->work_counter++;
4083 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4084}
4085
Sathya Perla39f1d942012-05-08 19:41:24 +00004086static bool be_reset_required(struct be_adapter *adapter)
4087{
Sathya Perlad79c0a22012-06-05 19:37:22 +00004088 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004089}
4090
Sathya Perlad3791422012-09-28 04:39:44 +00004091static char *mc_name(struct be_adapter *adapter)
4092{
4093 if (adapter->function_mode & FLEX10_MODE)
4094 return "FLEX10";
4095 else if (adapter->function_mode & VNIC_MODE)
4096 return "vNIC";
4097 else if (adapter->function_mode & UMC_ENABLED)
4098 return "UMC";
4099 else
4100 return "";
4101}
4102
4103static inline char *func_name(struct be_adapter *adapter)
4104{
4105 return be_physfn(adapter) ? "PF" : "VF";
4106}
4107
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004108static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004109{
4110 int status = 0;
4111 struct be_adapter *adapter;
4112 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004113 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004114
4115 status = pci_enable_device(pdev);
4116 if (status)
4117 goto do_none;
4118
4119 status = pci_request_regions(pdev, DRV_NAME);
4120 if (status)
4121 goto disable_dev;
4122 pci_set_master(pdev);
4123
Sathya Perla7f640062012-06-05 19:37:20 +00004124 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004125 if (netdev == NULL) {
4126 status = -ENOMEM;
4127 goto rel_reg;
4128 }
4129 adapter = netdev_priv(netdev);
4130 adapter->pdev = pdev;
4131 pci_set_drvdata(pdev, adapter);
4132 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004133 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004134
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004135 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004136 if (!status) {
Craig Hada2bd92cd2013-04-21 23:28:18 +00004137 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4138 if (status < 0) {
4139 dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4140 goto free_netdev;
4141 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004142 netdev->features |= NETIF_F_HIGHDMA;
4143 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004144 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004145 if (status) {
4146 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4147 goto free_netdev;
4148 }
4149 }
4150
Sathya Perlad6b6d982012-09-05 01:56:48 +00004151 status = pci_enable_pcie_error_reporting(pdev);
4152 if (status)
4153 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4154
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004155 status = be_ctrl_init(adapter);
4156 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004157 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004158
Sathya Perla2243e2e2009-11-22 22:02:03 +00004159 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004160 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004161 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004162 if (status)
4163 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004164 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004165
4166 /* tell fw we're ready to fire cmds */
4167 status = be_cmd_fw_init(adapter);
4168 if (status)
4169 goto ctrl_clean;
4170
Sathya Perla39f1d942012-05-08 19:41:24 +00004171 if (be_reset_required(adapter)) {
4172 status = be_cmd_reset_function(adapter);
4173 if (status)
4174 goto ctrl_clean;
4175 }
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004176
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004177 /* Wait for interrupts to quiesce after an FLR */
4178 msleep(100);
4179
4180 /* Allow interrupts for other ULPs running on NIC function */
4181 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004182
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004183 status = be_stats_init(adapter);
4184 if (status)
4185 goto ctrl_clean;
4186
Sathya Perla39f1d942012-05-08 19:41:24 +00004187 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004188 if (status)
4189 goto stats_clean;
4190
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004191 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004192 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004193 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004194
Sathya Perla5fb379e2009-06-18 00:02:59 +00004195 status = be_setup(adapter);
4196 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004197 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004198
Sathya Perla3abcded2010-10-03 22:12:27 -07004199 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004200 status = register_netdev(netdev);
4201 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004202 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004203
Parav Pandit045508a2012-03-26 14:27:13 +00004204 be_roce_dev_add(adapter);
4205
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004206 schedule_delayed_work(&adapter->func_recovery_work,
4207 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004208
4209 be_cmd_query_port_name(adapter, &port_name);
4210
Sathya Perlad3791422012-09-28 04:39:44 +00004211 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4212 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004213
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004214 return 0;
4215
Sathya Perla5fb379e2009-06-18 00:02:59 +00004216unsetup:
4217 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004218stats_clean:
4219 be_stats_cleanup(adapter);
4220ctrl_clean:
4221 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004222free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004223 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004224 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004225rel_reg:
4226 pci_release_regions(pdev);
4227disable_dev:
4228 pci_disable_device(pdev);
4229do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004230 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004231 return status;
4232}
4233
4234static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4235{
4236 struct be_adapter *adapter = pci_get_drvdata(pdev);
4237 struct net_device *netdev = adapter->netdev;
4238
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004239 if (adapter->wol)
4240 be_setup_wol(adapter, true);
4241
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004242 cancel_delayed_work_sync(&adapter->func_recovery_work);
4243
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004244 netif_device_detach(netdev);
4245 if (netif_running(netdev)) {
4246 rtnl_lock();
4247 be_close(netdev);
4248 rtnl_unlock();
4249 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004250 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004251
4252 pci_save_state(pdev);
4253 pci_disable_device(pdev);
4254 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4255 return 0;
4256}
4257
4258static int be_resume(struct pci_dev *pdev)
4259{
4260 int status = 0;
4261 struct be_adapter *adapter = pci_get_drvdata(pdev);
4262 struct net_device *netdev = adapter->netdev;
4263
4264 netif_device_detach(netdev);
4265
4266 status = pci_enable_device(pdev);
4267 if (status)
4268 return status;
4269
4270 pci_set_power_state(pdev, 0);
4271 pci_restore_state(pdev);
4272
Sathya Perla2243e2e2009-11-22 22:02:03 +00004273 /* tell fw we're ready to fire cmds */
4274 status = be_cmd_fw_init(adapter);
4275 if (status)
4276 return status;
4277
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004278 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004279 if (netif_running(netdev)) {
4280 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004281 be_open(netdev);
4282 rtnl_unlock();
4283 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004284
4285 schedule_delayed_work(&adapter->func_recovery_work,
4286 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004287 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004288
4289 if (adapter->wol)
4290 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004291
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004292 return 0;
4293}
4294
Sathya Perla82456b02010-02-17 01:35:37 +00004295/*
4296 * An FLR will stop BE from DMAing any data.
4297 */
4298static void be_shutdown(struct pci_dev *pdev)
4299{
4300 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004301
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004302 if (!adapter)
4303 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004304
Sathya Perla0f4a6822011-03-21 20:49:28 +00004305 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004306 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004307
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004308 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004309
Ajit Khaparde57841862011-04-06 18:08:43 +00004310 be_cmd_reset_function(adapter);
4311
Sathya Perla82456b02010-02-17 01:35:37 +00004312 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004313}
4314
Sathya Perlacf588472010-02-14 21:22:01 +00004315static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4316 pci_channel_state_t state)
4317{
4318 struct be_adapter *adapter = pci_get_drvdata(pdev);
4319 struct net_device *netdev = adapter->netdev;
4320
4321 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4322
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004323 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004324
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004325 cancel_delayed_work_sync(&adapter->func_recovery_work);
4326
4327 rtnl_lock();
Sathya Perlacf588472010-02-14 21:22:01 +00004328 netif_device_detach(netdev);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004329 rtnl_unlock();
Sathya Perlacf588472010-02-14 21:22:01 +00004330
4331 if (netif_running(netdev)) {
4332 rtnl_lock();
4333 be_close(netdev);
4334 rtnl_unlock();
4335 }
4336 be_clear(adapter);
4337
4338 if (state == pci_channel_io_perm_failure)
4339 return PCI_ERS_RESULT_DISCONNECT;
4340
4341 pci_disable_device(pdev);
4342
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004343 /* The error could cause the FW to trigger a flash debug dump.
4344 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004345 * can cause it not to recover; wait for it to finish.
4346 * Wait only for first function as it is needed only once per
4347 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004348 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004349 if (pdev->devfn == 0)
4350 ssleep(30);
4351
Sathya Perlacf588472010-02-14 21:22:01 +00004352 return PCI_ERS_RESULT_NEED_RESET;
4353}
4354
4355static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4356{
4357 struct be_adapter *adapter = pci_get_drvdata(pdev);
4358 int status;
4359
4360 dev_info(&adapter->pdev->dev, "EEH reset\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004361 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004362
4363 status = pci_enable_device(pdev);
4364 if (status)
4365 return PCI_ERS_RESULT_DISCONNECT;
4366
4367 pci_set_master(pdev);
4368 pci_set_power_state(pdev, 0);
4369 pci_restore_state(pdev);
4370
4371 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004372 dev_info(&adapter->pdev->dev,
4373 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004374 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004375 if (status)
4376 return PCI_ERS_RESULT_DISCONNECT;
4377
Sathya Perlad6b6d982012-09-05 01:56:48 +00004378 pci_cleanup_aer_uncorrect_error_status(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004379 return PCI_ERS_RESULT_RECOVERED;
4380}
4381
4382static void be_eeh_resume(struct pci_dev *pdev)
4383{
4384 int status = 0;
4385 struct be_adapter *adapter = pci_get_drvdata(pdev);
4386 struct net_device *netdev = adapter->netdev;
4387
4388 dev_info(&adapter->pdev->dev, "EEH resume\n");
4389
4390 pci_save_state(pdev);
4391
4392 /* tell fw we're ready to fire cmds */
4393 status = be_cmd_fw_init(adapter);
4394 if (status)
4395 goto err;
4396
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004397 status = be_cmd_reset_function(adapter);
4398 if (status)
4399 goto err;
4400
Sathya Perlacf588472010-02-14 21:22:01 +00004401 status = be_setup(adapter);
4402 if (status)
4403 goto err;
4404
4405 if (netif_running(netdev)) {
4406 status = be_open(netdev);
4407 if (status)
4408 goto err;
4409 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004410
4411 schedule_delayed_work(&adapter->func_recovery_work,
4412 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004413 netif_device_attach(netdev);
4414 return;
4415err:
4416 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004417}
4418
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004419static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004420 .error_detected = be_eeh_err_detected,
4421 .slot_reset = be_eeh_reset,
4422 .resume = be_eeh_resume,
4423};
4424
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004425static struct pci_driver be_driver = {
4426 .name = DRV_NAME,
4427 .id_table = be_dev_ids,
4428 .probe = be_probe,
4429 .remove = be_remove,
4430 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004431 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004432 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004433 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004434};
4435
4436static int __init be_init_module(void)
4437{
Joe Perches8e95a202009-12-03 07:58:21 +00004438 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4439 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004440 printk(KERN_WARNING DRV_NAME
4441 " : Module param rx_frag_size must be 2048/4096/8192."
4442 " Using 2048\n");
4443 rx_frag_size = 2048;
4444 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004445
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004446 return pci_register_driver(&be_driver);
4447}
4448module_init(be_init_module);
4449
4450static void __exit be_exit_module(void)
4451{
4452 pci_unregister_driver(&be_driver);
4453}
4454module_exit(be_exit_module);