blob: 1c734915933f4c77c8b76eedaa800bfde2bd6e6b [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamc7bb15a2013-03-06 20:05:05 +00002 * Copyright (C) 2005 - 2013 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070024
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000028MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_LICENSE("GPL");
30
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla11ac75e2011-12-13 00:58:50 +000035static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070048 { 0 }
49};
50MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000051/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070052static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000053 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85};
86/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070087static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000088 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700111 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700121
Sathya Perla752961a2011-10-24 02:45:03 +0000122/* Is BE in a multi-channel mode */
123static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127}
128
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700129static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130{
131 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000135 mem->va = NULL;
136 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
Joe Perches1f9061d22013-03-15 07:23:58 +0000149 GFP_KERNEL | __GFP_ZERO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000151 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 return 0;
153}
154
Somnath Kotur68c45a22013-03-14 02:42:07 +0000155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169
Sathya Perladb3ea782011-08-22 19:41:52 +0000170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Somnath Kotur68c45a22013-03-14 02:42:07 +0000174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
Sathya Perla8788fdc2009-07-27 22:52:03 +0000190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700191{
192 u32 val = 0;
193 val |= qid & DB_RQ_RING_ID_MASK;
194 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000195
196 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000197 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700198}
199
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000200static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
201 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202{
203 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000206
207 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000208 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209}
210
Sathya Perla8788fdc2009-07-27 22:52:03 +0000211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700212 bool arm, bool clear_int, u16 num_popped)
213{
214 u32 val = 0;
215 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000216 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000218
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000219 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000220 return;
221
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700229}
230
Sathya Perla8788fdc2009-07-27 22:52:03 +0000231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232{
233 u32 val = 0;
234 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000235 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000237
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000238 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000239 return;
240
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241 if (arm)
242 val |= 1 << DB_CQ_REARM_SHIFT;
243 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000244 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245}
246
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247static int be_mac_addr_set(struct net_device *netdev, void *p)
248{
249 struct be_adapter *adapter = netdev_priv(netdev);
250 struct sockaddr *addr = p;
251 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000252 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000253 u32 pmac_id = adapter->pmac_id[0];
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000254 bool active_mac = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700255
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000256 if (!is_valid_ether_addr(addr->sa_data))
257 return -EADDRNOTAVAIL;
258
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000259 /* For BE VF, MAC address is already activated by PF.
260 * Hence only operation left is updating netdev->devaddr.
261 * Update it if user is passing the same MAC which was used
262 * during configuring VF MAC from PF(Hypervisor).
263 */
264 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
265 status = be_cmd_mac_addr_query(adapter, current_mac,
266 false, adapter->if_handle, 0);
267 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
268 goto done;
269 else
270 goto err;
271 }
272
273 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
274 goto done;
275
276 /* For Lancer check if any MAC is active.
277 * If active, get its mac id.
278 */
279 if (lancer_chip(adapter) && !be_physfn(adapter))
280 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
281 &pmac_id, 0);
282
283 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
284 adapter->if_handle,
285 &adapter->pmac_id[0], 0);
286
Sathya Perlaa65027e2009-08-17 00:58:04 +0000287 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000288 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700289
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000290 if (active_mac)
291 be_cmd_pmac_del(adapter, adapter->if_handle,
292 pmac_id, 0);
293done:
Somnath Koture3a7ae22011-10-27 07:14:05 +0000294 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
295 return 0;
296err:
297 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700298 return status;
299}
300
Sathya Perlaca34fe32012-11-06 17:48:56 +0000301/* BE2 supports only v0 cmd */
302static void *hw_stats_from_cmd(struct be_adapter *adapter)
303{
304 if (BE2_chip(adapter)) {
305 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
306
307 return &cmd->hw_stats;
308 } else {
309 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
310
311 return &cmd->hw_stats;
312 }
313}
314
315/* BE2 supports only v0 cmd */
316static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
317{
318 if (BE2_chip(adapter)) {
319 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
320
321 return &hw_stats->erx;
322 } else {
323 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
324
325 return &hw_stats->erx;
326 }
327}
328
329static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000330{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000334 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000335 &rxf_stats->port[adapter->port_num];
336 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000337
Sathya Perlaac124ff2011-07-25 19:10:14 +0000338 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000339 drvs->rx_pause_frames = port_stats->rx_pause_frames;
340 drvs->rx_crc_errors = port_stats->rx_crc_errors;
341 drvs->rx_control_frames = port_stats->rx_control_frames;
342 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000354 drvs->rx_dropped_header_too_small =
355 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000356 drvs->rx_address_filtered =
357 port_stats->rx_address_filtered +
358 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000359 drvs->rx_alignment_symbol_errors =
360 port_stats->rx_alignment_symbol_errors;
361
362 drvs->tx_pauseframes = port_stats->tx_pauseframes;
363 drvs->tx_controlframes = port_stats->tx_controlframes;
364
365 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000366 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000367 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000368 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000369 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000371 drvs->forwarded_packets = rxf_stats->forwarded_packets;
372 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000373 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000375 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
376}
377
Sathya Perlaca34fe32012-11-06 17:48:56 +0000378static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000379{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000380 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000383 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 &rxf_stats->port[adapter->port_num];
385 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386
Sathya Perlaac124ff2011-07-25 19:10:14 +0000387 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000388 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390 drvs->rx_pause_frames = port_stats->rx_pause_frames;
391 drvs->rx_crc_errors = port_stats->rx_crc_errors;
392 drvs->rx_control_frames = port_stats->rx_control_frames;
393 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403 drvs->rx_dropped_header_too_small =
404 port_stats->rx_dropped_header_too_small;
405 drvs->rx_input_fifo_overflow_drop =
406 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000407 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000408 drvs->rx_alignment_symbol_errors =
409 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000410 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000411 drvs->tx_pauseframes = port_stats->tx_pauseframes;
412 drvs->tx_controlframes = port_stats->tx_controlframes;
413 drvs->jabber_events = port_stats->jabber_events;
414 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000415 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000416 drvs->forwarded_packets = rxf_stats->forwarded_packets;
417 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000418 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
419 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000420 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
421}
422
Selvin Xavier005d5692011-05-16 07:36:35 +0000423static void populate_lancer_stats(struct be_adapter *adapter)
424{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000425
Selvin Xavier005d5692011-05-16 07:36:35 +0000426 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000427 struct lancer_pport_stats *pport_stats =
428 pport_stats_from_cmd(adapter);
429
430 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
431 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
432 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
433 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000434 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000435 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000436 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
437 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
438 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
439 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
440 drvs->rx_dropped_tcp_length =
441 pport_stats->rx_dropped_invalid_tcp_length;
442 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
443 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
444 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
445 drvs->rx_dropped_header_too_small =
446 pport_stats->rx_dropped_header_too_small;
447 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000448 drvs->rx_address_filtered =
449 pport_stats->rx_address_filtered +
450 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000451 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000452 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000453 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
454 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000455 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000456 drvs->forwarded_packets = pport_stats->num_forwards_lo;
457 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000458 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000459 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000460}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000461
Sathya Perla09c1c682011-08-22 19:41:53 +0000462static void accumulate_16bit_val(u32 *acc, u16 val)
463{
464#define lo(x) (x & 0xFFFF)
465#define hi(x) (x & 0xFFFF0000)
466 bool wrapped = val < lo(*acc);
467 u32 newacc = hi(*acc) + val;
468
469 if (wrapped)
470 newacc += 65536;
471 ACCESS_ONCE(*acc) = newacc;
472}
473
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000474void be_parse_stats(struct be_adapter *adapter)
475{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000476 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
477 struct be_rx_obj *rxo;
478 int i;
479
Sathya Perlaca34fe32012-11-06 17:48:56 +0000480 if (lancer_chip(adapter)) {
481 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000482 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000483 if (BE2_chip(adapter))
484 populate_be_v0_stats(adapter);
485 else
486 /* for BE3 and Skyhawk */
487 populate_be_v1_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000488
Sathya Perlaca34fe32012-11-06 17:48:56 +0000489 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
490 for_all_rx_queues(adapter, rxo, i) {
491 /* below erx HW counter can actually wrap around after
492 * 65535. Driver accumulates a 32-bit value
493 */
494 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
495 (u16)erx->rx_drops_no_fragments \
496 [rxo->q.id]);
497 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000498 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000499}
500
Sathya Perlaab1594e2011-07-25 19:10:15 +0000501static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
502 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700503{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000504 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000505 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700506 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000507 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000508 u64 pkts, bytes;
509 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700510 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700511
Sathya Perla3abcded2010-10-03 22:12:27 -0700512 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000513 const struct be_rx_stats *rx_stats = rx_stats(rxo);
514 do {
515 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
516 pkts = rx_stats(rxo)->rx_pkts;
517 bytes = rx_stats(rxo)->rx_bytes;
518 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
519 stats->rx_packets += pkts;
520 stats->rx_bytes += bytes;
521 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
522 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
523 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700524 }
525
Sathya Perla3c8def92011-06-12 20:01:58 +0000526 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000527 const struct be_tx_stats *tx_stats = tx_stats(txo);
528 do {
529 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
530 pkts = tx_stats(txo)->tx_pkts;
531 bytes = tx_stats(txo)->tx_bytes;
532 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
533 stats->tx_packets += pkts;
534 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000535 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700536
537 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000538 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000539 drvs->rx_alignment_symbol_errors +
540 drvs->rx_in_range_errors +
541 drvs->rx_out_range_errors +
542 drvs->rx_frame_too_long +
543 drvs->rx_dropped_too_small +
544 drvs->rx_dropped_too_short +
545 drvs->rx_dropped_header_too_small +
546 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000547 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700548
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700549 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000550 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000551 drvs->rx_out_range_errors +
552 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000553
Sathya Perlaab1594e2011-07-25 19:10:15 +0000554 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700555
556 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000557 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000558
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700559 /* receiver fifo overrun */
560 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000561 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000562 drvs->rx_input_fifo_overflow_drop +
563 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000564 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700565}
566
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000567void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700568{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700569 struct net_device *netdev = adapter->netdev;
570
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000571 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000572 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000573 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700574 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000575
576 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
577 netif_carrier_on(netdev);
578 else
579 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700580}
581
Sathya Perla3c8def92011-06-12 20:01:58 +0000582static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000583 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700584{
Sathya Perla3c8def92011-06-12 20:01:58 +0000585 struct be_tx_stats *stats = tx_stats(txo);
586
Sathya Perlaab1594e2011-07-25 19:10:15 +0000587 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000588 stats->tx_reqs++;
589 stats->tx_wrbs += wrb_cnt;
590 stats->tx_bytes += copied;
591 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700592 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000593 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000594 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700595}
596
597/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000598static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
599 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700600{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700601 int cnt = (skb->len > skb->data_len);
602
603 cnt += skb_shinfo(skb)->nr_frags;
604
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700605 /* to account for hdr wrb */
606 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000607 if (lancer_chip(adapter) || !(cnt & 1)) {
608 *dummy = false;
609 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700610 /* add a dummy to make it an even num */
611 cnt++;
612 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000613 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700614 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
615 return cnt;
616}
617
618static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
619{
620 wrb->frag_pa_hi = upper_32_bits(addr);
621 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
622 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000623 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700624}
625
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000626static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
627 struct sk_buff *skb)
628{
629 u8 vlan_prio;
630 u16 vlan_tag;
631
632 vlan_tag = vlan_tx_tag_get(skb);
633 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
634 /* If vlan priority provided by OS is NOT in available bmap */
635 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
636 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
637 adapter->recommended_prio;
638
639 return vlan_tag;
640}
641
Somnath Kotur93040ae2012-06-26 22:32:10 +0000642static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
643{
644 return vlan_tx_tag_present(skb) || adapter->pvid;
645}
646
Somnath Koturcc4ce022010-10-21 07:11:14 -0700647static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
648 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700649{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000650 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700651
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 memset(hdr, 0, sizeof(*hdr));
653
654 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
655
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000656 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
658 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
659 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000660 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000661 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
663 if (is_tcp_pkt(skb))
664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
665 else if (is_udp_pkt(skb))
666 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
667 }
668
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700669 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000671 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700672 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700673 }
674
675 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
676 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
677 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
678 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
679}
680
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000681static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000682 bool unmap_single)
683{
684 dma_addr_t dma;
685
686 be_dws_le_to_cpu(wrb, sizeof(*wrb));
687
688 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000689 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000690 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000691 dma_unmap_single(dev, dma, wrb->frag_len,
692 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000693 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000694 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000695 }
696}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697
Sathya Perla3c8def92011-06-12 20:01:58 +0000698static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700699 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
700{
Sathya Perla7101e112010-03-22 20:41:12 +0000701 dma_addr_t busaddr;
702 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000703 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700704 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700705 struct be_eth_wrb *wrb;
706 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000707 bool map_single = false;
708 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700709
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700710 hdr = queue_head_node(txq);
711 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000712 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700713
David S. Millerebc8d2a2009-06-09 01:01:31 -0700714 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700715 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000716 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
717 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000718 goto dma_err;
719 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700720 wrb = queue_head_node(txq);
721 wrb_fill(wrb, busaddr, len);
722 be_dws_cpu_to_le(wrb, sizeof(*wrb));
723 queue_head_inc(txq);
724 copied += len;
725 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700726
David S. Millerebc8d2a2009-06-09 01:01:31 -0700727 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000728 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700729 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000730 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000731 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000732 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000733 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700734 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000735 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700736 be_dws_cpu_to_le(wrb, sizeof(*wrb));
737 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000738 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700739 }
740
741 if (dummy_wrb) {
742 wrb = queue_head_node(txq);
743 wrb_fill(wrb, 0, 0);
744 be_dws_cpu_to_le(wrb, sizeof(*wrb));
745 queue_head_inc(txq);
746 }
747
Somnath Koturcc4ce022010-10-21 07:11:14 -0700748 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700749 be_dws_cpu_to_le(hdr, sizeof(*hdr));
750
751 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000752dma_err:
753 txq->head = map_head;
754 while (copied) {
755 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000756 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000757 map_single = false;
758 copied -= wrb->frag_len;
759 queue_head_inc(txq);
760 }
761 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700762}
763
Somnath Kotur93040ae2012-06-26 22:32:10 +0000764static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
765 struct sk_buff *skb)
766{
767 u16 vlan_tag = 0;
768
769 skb = skb_share_check(skb, GFP_ATOMIC);
770 if (unlikely(!skb))
771 return skb;
772
773 if (vlan_tx_tag_present(skb)) {
774 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
David S. Miller6e0895c2013-04-22 20:32:51 -0400775 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ivan Veceraf11a8692013-04-12 16:49:24 +0200776 if (skb)
777 skb->vlan_tci = 0;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000778 }
779
780 return skb;
781}
782
Stephen Hemminger613573252009-08-31 19:50:58 +0000783static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700784 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700785{
786 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000787 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
788 struct be_queue_info *txq = &txo->q;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000789 struct iphdr *ip = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700790 u32 wrb_cnt = 0, copied = 0;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000791 u32 start = txq->head, eth_hdr_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700792 bool dummy_wrb, stopped = false;
793
Somnath Kotur93040ae2012-06-26 22:32:10 +0000794 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
795 VLAN_ETH_HLEN : ETH_HLEN;
796
797 /* HW has a bug which considers padding bytes as legal
798 * and modifies the IPv4 hdr's 'tot_len' field
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000799 */
Somnath Kotur93040ae2012-06-26 22:32:10 +0000800 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
801 is_ipv4_pkt(skb)) {
802 ip = (struct iphdr *)ip_hdr(skb);
803 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
804 }
805
806 /* HW has a bug wherein it will calculate CSUM for VLAN
807 * pkts even though it is disabled.
808 * Manually insert VLAN in pkt.
809 */
810 if (skb->ip_summed != CHECKSUM_PARTIAL &&
811 be_vlan_tag_chk(adapter, skb)) {
812 skb = be_insert_vlan_in_pkt(adapter, skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000813 if (unlikely(!skb))
814 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000815 }
816
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000817 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700818
Sathya Perla3c8def92011-06-12 20:01:58 +0000819 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000820 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000821 int gso_segs = skb_shinfo(skb)->gso_segs;
822
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000823 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000824 BUG_ON(txo->sent_skb_list[start]);
825 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700826
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000827 /* Ensure txq has space for the next skb; Else stop the queue
828 * *BEFORE* ringing the tx doorbell, so that we serialze the
829 * tx compls of the current transmit which'll wake up the queue
830 */
Sathya Perla7101e112010-03-22 20:41:12 +0000831 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000832 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
833 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000834 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000835 stopped = true;
836 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700837
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000838 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000839
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000840 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000841 } else {
842 txq->head = start;
843 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700844 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000845tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700846 return NETDEV_TX_OK;
847}
848
849static int be_change_mtu(struct net_device *netdev, int new_mtu)
850{
851 struct be_adapter *adapter = netdev_priv(netdev);
852 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000853 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
854 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700855 dev_info(&adapter->pdev->dev,
856 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000857 BE_MIN_MTU,
858 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700859 return -EINVAL;
860 }
861 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
862 netdev->mtu, new_mtu);
863 netdev->mtu = new_mtu;
864 return 0;
865}
866
867/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000868 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
869 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700870 */
Sathya Perla10329df2012-06-05 19:37:18 +0000871static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700872{
Sathya Perla10329df2012-06-05 19:37:18 +0000873 u16 vids[BE_NUM_VLANS_SUPPORTED];
874 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000875 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000876
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000877 /* No need to further configure vids if in promiscuous mode */
878 if (adapter->promiscuous)
879 return 0;
880
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000881 if (adapter->vlans_added > adapter->max_vlans)
882 goto set_vlan_promisc;
883
884 /* Construct VLAN Table to give to HW */
885 for (i = 0; i < VLAN_N_VID; i++)
886 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +0000887 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000888
889 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +0000890 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000891
892 /* Set to VLAN promisc mode as setting VLAN filter failed */
893 if (status) {
894 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
895 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
896 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700897 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000898
Sathya Perlab31c50a2009-09-17 10:30:13 -0700899 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000900
901set_vlan_promisc:
902 status = be_cmd_vlan_config(adapter, adapter->if_handle,
903 NULL, 0, 1, 1);
904 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700905}
906
Patrick McHardy80d5c362013-04-19 02:04:28 +0000907static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700908{
909 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000910 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700911
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000912 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000913 status = -EINVAL;
914 goto ret;
915 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000916
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000917 /* Packets with VID 0 are always received by Lancer by default */
918 if (lancer_chip(adapter) && vid == 0)
919 goto ret;
920
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700921 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000922 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Sathya Perla10329df2012-06-05 19:37:18 +0000923 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500924
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000925 if (!status)
926 adapter->vlans_added++;
927 else
928 adapter->vlan_tag[vid] = 0;
929ret:
930 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700931}
932
Patrick McHardy80d5c362013-04-19 02:04:28 +0000933static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700934{
935 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000936 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700937
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000938 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000939 status = -EINVAL;
940 goto ret;
941 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000942
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000943 /* Packets with VID 0 are always received by Lancer by default */
944 if (lancer_chip(adapter) && vid == 0)
945 goto ret;
946
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700947 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000948 if (adapter->vlans_added <= adapter->max_vlans)
Sathya Perla10329df2012-06-05 19:37:18 +0000949 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500950
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000951 if (!status)
952 adapter->vlans_added--;
953 else
954 adapter->vlan_tag[vid] = 1;
955ret:
956 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700957}
958
Sathya Perlaa54769f2011-10-24 02:45:00 +0000959static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700960{
961 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000962 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700963
964 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000965 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000966 adapter->promiscuous = true;
967 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700968 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000969
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300970 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000971 if (adapter->promiscuous) {
972 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000973 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000974
975 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +0000976 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +0000977 }
978
Sathya Perlae7b909a2009-11-22 22:01:10 +0000979 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000980 if (netdev->flags & IFF_ALLMULTI ||
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +0000981 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000982 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000983 goto done;
984 }
985
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000986 if (netdev_uc_count(netdev) != adapter->uc_macs) {
987 struct netdev_hw_addr *ha;
988 int i = 1; /* First slot is claimed by the Primary MAC */
989
990 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
991 be_cmd_pmac_del(adapter, adapter->if_handle,
992 adapter->pmac_id[i], 0);
993 }
994
995 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
996 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
997 adapter->promiscuous = true;
998 goto done;
999 }
1000
1001 netdev_for_each_uc_addr(ha, adapter->netdev) {
1002 adapter->uc_macs++; /* First slot is for Primary MAC */
1003 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1004 adapter->if_handle,
1005 &adapter->pmac_id[adapter->uc_macs], 0);
1006 }
1007 }
1008
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001009 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1010
1011 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1012 if (status) {
1013 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1014 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1015 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1016 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001017done:
1018 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001019}
1020
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001021static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1022{
1023 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001024 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001025 int status;
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +00001026 bool active_mac = false;
1027 u32 pmac_id;
1028 u8 old_mac[ETH_ALEN];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001029
Sathya Perla11ac75e2011-12-13 00:58:50 +00001030 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001031 return -EPERM;
1032
Sathya Perla11ac75e2011-12-13 00:58:50 +00001033 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001034 return -EINVAL;
1035
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001036 if (lancer_chip(adapter)) {
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +00001037 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1038 &pmac_id, vf + 1);
1039 if (!status && active_mac)
1040 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1041 pmac_id, vf + 1);
1042
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001043 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1044 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +00001045 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1046 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001047
Sathya Perla11ac75e2011-12-13 00:58:50 +00001048 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1049 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001050 }
1051
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001052 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001053 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1054 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001055 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001056 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001057
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001058 return status;
1059}
1060
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001061static int be_get_vf_config(struct net_device *netdev, int vf,
1062 struct ifla_vf_info *vi)
1063{
1064 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001065 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001066
Sathya Perla11ac75e2011-12-13 00:58:50 +00001067 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001068 return -EPERM;
1069
Sathya Perla11ac75e2011-12-13 00:58:50 +00001070 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001071 return -EINVAL;
1072
1073 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001074 vi->tx_rate = vf_cfg->tx_rate;
1075 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001076 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001077 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001078
1079 return 0;
1080}
1081
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001082static int be_set_vf_vlan(struct net_device *netdev,
1083 int vf, u16 vlan, u8 qos)
1084{
1085 struct be_adapter *adapter = netdev_priv(netdev);
1086 int status = 0;
1087
Sathya Perla11ac75e2011-12-13 00:58:50 +00001088 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001089 return -EPERM;
1090
Sathya Perla11ac75e2011-12-13 00:58:50 +00001091 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001092 return -EINVAL;
1093
1094 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001095 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1096 /* If this is new value, program it. Else skip. */
1097 adapter->vf_cfg[vf].vlan_tag = vlan;
1098
1099 status = be_cmd_set_hsw_config(adapter, vlan,
1100 vf + 1, adapter->vf_cfg[vf].if_handle);
1101 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001102 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001103 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001104 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001105 vlan = adapter->vf_cfg[vf].def_vid;
1106 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1107 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001108 }
1109
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001110
1111 if (status)
1112 dev_info(&adapter->pdev->dev,
1113 "VLAN %d config on VF %d failed\n", vlan, vf);
1114 return status;
1115}
1116
Ajit Khapardee1d18732010-07-23 01:52:13 +00001117static int be_set_vf_tx_rate(struct net_device *netdev,
1118 int vf, int rate)
1119{
1120 struct be_adapter *adapter = netdev_priv(netdev);
1121 int status = 0;
1122
Sathya Perla11ac75e2011-12-13 00:58:50 +00001123 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001124 return -EPERM;
1125
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001126 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001127 return -EINVAL;
1128
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001129 if (rate < 100 || rate > 10000) {
1130 dev_err(&adapter->pdev->dev,
1131 "tx rate must be between 100 and 10000 Mbps\n");
1132 return -EINVAL;
1133 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001134
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001135 if (lancer_chip(adapter))
1136 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1137 else
1138 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001139
1140 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001141 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001142 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001143 else
1144 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001145 return status;
1146}
1147
Sathya Perla39f1d942012-05-08 19:41:24 +00001148static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1149{
1150 struct pci_dev *dev, *pdev = adapter->pdev;
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001151 int vfs = 0, assigned_vfs = 0, pos;
Sathya Perla39f1d942012-05-08 19:41:24 +00001152 u16 offset, stride;
1153
1154 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
Sathya Perlad79c0a22012-06-05 19:37:22 +00001155 if (!pos)
1156 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00001157 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1158 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1159
1160 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1161 while (dev) {
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001162 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
Sathya Perla39f1d942012-05-08 19:41:24 +00001163 vfs++;
1164 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1165 assigned_vfs++;
1166 }
1167 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1168 }
1169 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1170}
1171
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001172static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001173{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001174 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001175 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001176 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001177 u64 pkts;
1178 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001179
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001180 if (!eqo->enable_aic) {
1181 eqd = eqo->eqd;
1182 goto modify_eqd;
1183 }
1184
1185 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001186 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001187
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001188 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1189
Sathya Perla4097f662009-03-24 16:40:13 -07001190 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001191 if (time_before(now, stats->rx_jiffies)) {
1192 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001193 return;
1194 }
1195
Sathya Perlaac124ff2011-07-25 19:10:14 +00001196 /* Update once a second */
1197 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001198 return;
1199
Sathya Perlaab1594e2011-07-25 19:10:15 +00001200 do {
1201 start = u64_stats_fetch_begin_bh(&stats->sync);
1202 pkts = stats->rx_pkts;
1203 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1204
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001205 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001206 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001207 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001208 eqd = (stats->rx_pps / 110000) << 3;
1209 eqd = min(eqd, eqo->max_eqd);
1210 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001211 if (eqd < 10)
1212 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001213
1214modify_eqd:
1215 if (eqd != eqo->cur_eqd) {
1216 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1217 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001218 }
Sathya Perla4097f662009-03-24 16:40:13 -07001219}
1220
Sathya Perla3abcded2010-10-03 22:12:27 -07001221static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001222 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001223{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001224 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001225
Sathya Perlaab1594e2011-07-25 19:10:15 +00001226 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001227 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001228 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001229 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001230 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001231 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001232 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001233 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001234 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001235}
1236
Sathya Perla2e588f82011-03-11 02:49:26 +00001237static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001238{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001239 /* L4 checksum is not reliable for non TCP/UDP packets.
1240 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001241 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1242 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001243}
1244
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001245static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1246 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001247{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001248 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001249 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001250 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001251
Sathya Perla3abcded2010-10-03 22:12:27 -07001252 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001253 BUG_ON(!rx_page_info->page);
1254
Ajit Khaparde205859a2010-02-09 01:34:21 +00001255 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001256 dma_unmap_page(&adapter->pdev->dev,
1257 dma_unmap_addr(rx_page_info, bus),
1258 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001259 rx_page_info->last_page_user = false;
1260 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001261
1262 atomic_dec(&rxq->used);
1263 return rx_page_info;
1264}
1265
1266/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001267static void be_rx_compl_discard(struct be_rx_obj *rxo,
1268 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001269{
Sathya Perla3abcded2010-10-03 22:12:27 -07001270 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001271 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001272 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001273
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001274 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001275 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001276 put_page(page_info->page);
1277 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001278 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001279 }
1280}
1281
1282/*
1283 * skb_fill_rx_data forms a complete skb for an ether frame
1284 * indicated by rxcp.
1285 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001286static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1287 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001288{
Sathya Perla3abcded2010-10-03 22:12:27 -07001289 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001290 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001291 u16 i, j;
1292 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001293 u8 *start;
1294
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001295 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001296 start = page_address(page_info->page) + page_info->page_offset;
1297 prefetch(start);
1298
1299 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001300 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001301
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001302 skb->len = curr_frag_len;
1303 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001304 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001305 /* Complete packet has now been moved to data */
1306 put_page(page_info->page);
1307 skb->data_len = 0;
1308 skb->tail += curr_frag_len;
1309 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001310 hdr_len = ETH_HLEN;
1311 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001312 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001313 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001314 skb_shinfo(skb)->frags[0].page_offset =
1315 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001316 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001317 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001318 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001319 skb->tail += hdr_len;
1320 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001321 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001322
Sathya Perla2e588f82011-03-11 02:49:26 +00001323 if (rxcp->pkt_size <= rx_frag_size) {
1324 BUG_ON(rxcp->num_rcvd != 1);
1325 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001326 }
1327
1328 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001329 index_inc(&rxcp->rxq_idx, rxq->len);
1330 remaining = rxcp->pkt_size - curr_frag_len;
1331 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001332 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001333 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001334
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001335 /* Coalesce all frags from the same physical page in one slot */
1336 if (page_info->page_offset == 0) {
1337 /* Fresh page */
1338 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001339 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001340 skb_shinfo(skb)->frags[j].page_offset =
1341 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001342 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001343 skb_shinfo(skb)->nr_frags++;
1344 } else {
1345 put_page(page_info->page);
1346 }
1347
Eric Dumazet9e903e02011-10-18 21:00:24 +00001348 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001349 skb->len += curr_frag_len;
1350 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001351 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001352 remaining -= curr_frag_len;
1353 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001354 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001355 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001356 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001357}
1358
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001359/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001360static void be_rx_compl_process(struct be_rx_obj *rxo,
1361 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001362{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001363 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001364 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001365 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001366
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001367 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001368 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001369 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001370 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001371 return;
1372 }
1373
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001374 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001375
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001376 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001377 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001378 else
1379 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001380
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001381 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001382 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001383 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001384 skb->rxhash = rxcp->rss_hash;
1385
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001386
Jiri Pirko343e43c2011-08-25 02:50:51 +00001387 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001388 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001389
1390 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001391}
1392
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001393/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001394void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1395 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001396{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001397 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001398 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001399 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001400 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001401 u16 remaining, curr_frag_len;
1402 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001403
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001404 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001405 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001406 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001407 return;
1408 }
1409
Sathya Perla2e588f82011-03-11 02:49:26 +00001410 remaining = rxcp->pkt_size;
1411 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001412 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001413
1414 curr_frag_len = min(remaining, rx_frag_size);
1415
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001416 /* Coalesce all frags from the same physical page in one slot */
1417 if (i == 0 || page_info->page_offset == 0) {
1418 /* First frag or Fresh page */
1419 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001420 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001421 skb_shinfo(skb)->frags[j].page_offset =
1422 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001423 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001424 } else {
1425 put_page(page_info->page);
1426 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001427 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001428 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001429 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001430 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001431 memset(page_info, 0, sizeof(*page_info));
1432 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001433 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001434
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001435 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001436 skb->len = rxcp->pkt_size;
1437 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001438 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001439 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001440 if (adapter->netdev->features & NETIF_F_RXHASH)
1441 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001442
Jiri Pirko343e43c2011-08-25 02:50:51 +00001443 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001444 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001445
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001446 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001447}
1448
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001449static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1450 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001451{
Sathya Perla2e588f82011-03-11 02:49:26 +00001452 rxcp->pkt_size =
1453 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1454 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1455 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1456 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001457 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001458 rxcp->ip_csum =
1459 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1460 rxcp->l4_csum =
1461 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1462 rxcp->ipv6 =
1463 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1464 rxcp->rxq_idx =
1465 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1466 rxcp->num_rcvd =
1467 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1468 rxcp->pkt_type =
1469 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001470 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001471 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001472 if (rxcp->vlanf) {
1473 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001474 compl);
1475 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1476 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001477 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001478 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001479}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001480
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001481static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1482 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001483{
1484 rxcp->pkt_size =
1485 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1486 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1487 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1488 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001489 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001490 rxcp->ip_csum =
1491 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1492 rxcp->l4_csum =
1493 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1494 rxcp->ipv6 =
1495 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1496 rxcp->rxq_idx =
1497 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1498 rxcp->num_rcvd =
1499 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1500 rxcp->pkt_type =
1501 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001502 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001503 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001504 if (rxcp->vlanf) {
1505 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001506 compl);
1507 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1508 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001509 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001510 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001511}
1512
1513static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1514{
1515 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1516 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1517 struct be_adapter *adapter = rxo->adapter;
1518
1519 /* For checking the valid bit it is Ok to use either definition as the
1520 * valid bit is at the same position in both v0 and v1 Rx compl */
1521 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001522 return NULL;
1523
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001524 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001525 be_dws_le_to_cpu(compl, sizeof(*compl));
1526
1527 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001528 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001529 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001530 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001531
Sathya Perla15d72182011-03-21 20:49:26 +00001532 if (rxcp->vlanf) {
1533 /* vlanf could be wrongly set in some cards.
1534 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001535 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001536 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001537
Sathya Perla15d72182011-03-21 20:49:26 +00001538 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001539 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001540
Somnath Kotur939cf302011-08-18 21:51:49 -07001541 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001542 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001543 rxcp->vlanf = 0;
1544 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001545
1546 /* As the compl has been parsed, reset it; we wont touch it again */
1547 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001548
Sathya Perla3abcded2010-10-03 22:12:27 -07001549 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001550 return rxcp;
1551}
1552
Eric Dumazet1829b082011-03-01 05:48:12 +00001553static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001554{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001555 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001556
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001557 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001558 gfp |= __GFP_COMP;
1559 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001560}
1561
1562/*
1563 * Allocate a page, split it to fragments of size rx_frag_size and post as
1564 * receive buffers to BE
1565 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001566static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001567{
Sathya Perla3abcded2010-10-03 22:12:27 -07001568 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001569 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001570 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001571 struct page *pagep = NULL;
1572 struct be_eth_rx_d *rxd;
1573 u64 page_dmaaddr = 0, frag_dmaaddr;
1574 u32 posted, page_offset = 0;
1575
Sathya Perla3abcded2010-10-03 22:12:27 -07001576 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001577 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1578 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001579 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001580 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001581 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001582 break;
1583 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001584 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1585 0, adapter->big_page_size,
1586 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001587 page_info->page_offset = 0;
1588 } else {
1589 get_page(pagep);
1590 page_info->page_offset = page_offset + rx_frag_size;
1591 }
1592 page_offset = page_info->page_offset;
1593 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001594 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001595 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1596
1597 rxd = queue_head_node(rxq);
1598 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1599 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001600
1601 /* Any space left in the current big page for another frag? */
1602 if ((page_offset + rx_frag_size + rx_frag_size) >
1603 adapter->big_page_size) {
1604 pagep = NULL;
1605 page_info->last_page_user = true;
1606 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001607
1608 prev_page_info = page_info;
1609 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001610 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001611 }
1612 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001613 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001614
1615 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001616 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001617 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001618 } else if (atomic_read(&rxq->used) == 0) {
1619 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001620 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001621 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001622}
1623
Sathya Perla5fb379e2009-06-18 00:02:59 +00001624static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001625{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001626 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1627
1628 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1629 return NULL;
1630
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001631 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001632 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1633
1634 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1635
1636 queue_tail_inc(tx_cq);
1637 return txcp;
1638}
1639
Sathya Perla3c8def92011-06-12 20:01:58 +00001640static u16 be_tx_compl_process(struct be_adapter *adapter,
1641 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001642{
Sathya Perla3c8def92011-06-12 20:01:58 +00001643 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001644 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001645 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001646 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001647 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1648 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001649
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001650 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001651 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001652 sent_skbs[txq->tail] = NULL;
1653
1654 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001655 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001656
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001657 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001658 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001659 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001660 unmap_tx_frag(&adapter->pdev->dev, wrb,
1661 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001662 unmap_skb_hdr = false;
1663
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001664 num_wrbs++;
1665 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001666 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001667
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001668 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001669 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001670}
1671
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001672/* Return the number of events in the event queue */
1673static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001674{
1675 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001676 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001677
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001678 do {
1679 eqe = queue_tail_node(&eqo->q);
1680 if (eqe->evt == 0)
1681 break;
1682
1683 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001684 eqe->evt = 0;
1685 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001686 queue_tail_inc(&eqo->q);
1687 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001688
1689 return num;
1690}
1691
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001692/* Leaves the EQ is disarmed state */
1693static void be_eq_clean(struct be_eq_obj *eqo)
1694{
1695 int num = events_get(eqo);
1696
1697 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1698}
1699
1700static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001701{
1702 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001703 struct be_queue_info *rxq = &rxo->q;
1704 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001705 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00001706 struct be_adapter *adapter = rxo->adapter;
1707 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001708 u16 tail;
1709
Sathya Perlad23e9462012-12-17 19:38:51 +00001710 /* Consume pending rx completions.
1711 * Wait for the flush completion (identified by zero num_rcvd)
1712 * to arrive. Notify CQ even when there are no more CQ entries
1713 * for HW to flush partially coalesced CQ entries.
1714 * In Lancer, there is no need to wait for flush compl.
1715 */
1716 for (;;) {
1717 rxcp = be_rx_compl_get(rxo);
1718 if (rxcp == NULL) {
1719 if (lancer_chip(adapter))
1720 break;
1721
1722 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1723 dev_warn(&adapter->pdev->dev,
1724 "did not receive flush compl\n");
1725 break;
1726 }
1727 be_cq_notify(adapter, rx_cq->id, true, 0);
1728 mdelay(1);
1729 } else {
1730 be_rx_compl_discard(rxo, rxcp);
1731 be_cq_notify(adapter, rx_cq->id, true, 1);
1732 if (rxcp->num_rcvd == 0)
1733 break;
1734 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001735 }
1736
Sathya Perlad23e9462012-12-17 19:38:51 +00001737 /* After cleanup, leave the CQ in unarmed state */
1738 be_cq_notify(adapter, rx_cq->id, false, 0);
1739
1740 /* Then free posted rx buffers that were not used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001741 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001742 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001743 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001744 put_page(page_info->page);
1745 memset(page_info, 0, sizeof(*page_info));
1746 }
1747 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001748 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001749}
1750
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001751static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001752{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001753 struct be_tx_obj *txo;
1754 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001755 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001756 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001757 struct sk_buff *sent_skb;
1758 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001759 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001760
Sathya Perlaa8e91792009-08-10 03:42:43 +00001761 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1762 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001763 pending_txqs = adapter->num_tx_qs;
1764
1765 for_all_tx_queues(adapter, txo, i) {
1766 txq = &txo->q;
1767 while ((txcp = be_tx_compl_get(&txo->cq))) {
1768 end_idx =
1769 AMAP_GET_BITS(struct amap_eth_tx_compl,
1770 wrb_index, txcp);
1771 num_wrbs += be_tx_compl_process(adapter, txo,
1772 end_idx);
1773 cmpl++;
1774 }
1775 if (cmpl) {
1776 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1777 atomic_sub(num_wrbs, &txq->used);
1778 cmpl = 0;
1779 num_wrbs = 0;
1780 }
1781 if (atomic_read(&txq->used) == 0)
1782 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001783 }
1784
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001785 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001786 break;
1787
1788 mdelay(1);
1789 } while (true);
1790
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001791 for_all_tx_queues(adapter, txo, i) {
1792 txq = &txo->q;
1793 if (atomic_read(&txq->used))
1794 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1795 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001796
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001797 /* free posted tx for which compls will never arrive */
1798 while (atomic_read(&txq->used)) {
1799 sent_skb = txo->sent_skb_list[txq->tail];
1800 end_idx = txq->tail;
1801 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1802 &dummy_wrb);
1803 index_adv(&end_idx, num_wrbs - 1, txq->len);
1804 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1805 atomic_sub(num_wrbs, &txq->used);
1806 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001807 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001808}
1809
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001810static void be_evt_queues_destroy(struct be_adapter *adapter)
1811{
1812 struct be_eq_obj *eqo;
1813 int i;
1814
1815 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001816 if (eqo->q.created) {
1817 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001818 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001819 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001820 be_queue_free(adapter, &eqo->q);
1821 }
1822}
1823
1824static int be_evt_queues_create(struct be_adapter *adapter)
1825{
1826 struct be_queue_info *eq;
1827 struct be_eq_obj *eqo;
1828 int i, rc;
1829
1830 adapter->num_evt_qs = num_irqs(adapter);
1831
1832 for_all_evt_queues(adapter, eqo, i) {
1833 eqo->adapter = adapter;
1834 eqo->tx_budget = BE_TX_BUDGET;
1835 eqo->idx = i;
1836 eqo->max_eqd = BE_MAX_EQD;
1837 eqo->enable_aic = true;
1838
1839 eq = &eqo->q;
1840 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1841 sizeof(struct be_eq_entry));
1842 if (rc)
1843 return rc;
1844
1845 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1846 if (rc)
1847 return rc;
1848 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001849 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001850}
1851
Sathya Perla5fb379e2009-06-18 00:02:59 +00001852static void be_mcc_queues_destroy(struct be_adapter *adapter)
1853{
1854 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001855
Sathya Perla8788fdc2009-07-27 22:52:03 +00001856 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001857 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001858 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001859 be_queue_free(adapter, q);
1860
Sathya Perla8788fdc2009-07-27 22:52:03 +00001861 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001862 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001863 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001864 be_queue_free(adapter, q);
1865}
1866
1867/* Must be called only after TX qs are created as MCC shares TX EQ */
1868static int be_mcc_queues_create(struct be_adapter *adapter)
1869{
1870 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001871
Sathya Perla8788fdc2009-07-27 22:52:03 +00001872 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001873 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001874 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001875 goto err;
1876
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001877 /* Use the default EQ for MCC completions */
1878 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001879 goto mcc_cq_free;
1880
Sathya Perla8788fdc2009-07-27 22:52:03 +00001881 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001882 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1883 goto mcc_cq_destroy;
1884
Sathya Perla8788fdc2009-07-27 22:52:03 +00001885 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001886 goto mcc_q_free;
1887
1888 return 0;
1889
1890mcc_q_free:
1891 be_queue_free(adapter, q);
1892mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001893 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001894mcc_cq_free:
1895 be_queue_free(adapter, cq);
1896err:
1897 return -1;
1898}
1899
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001900static void be_tx_queues_destroy(struct be_adapter *adapter)
1901{
1902 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001903 struct be_tx_obj *txo;
1904 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001905
Sathya Perla3c8def92011-06-12 20:01:58 +00001906 for_all_tx_queues(adapter, txo, i) {
1907 q = &txo->q;
1908 if (q->created)
1909 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1910 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001911
Sathya Perla3c8def92011-06-12 20:01:58 +00001912 q = &txo->cq;
1913 if (q->created)
1914 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1915 be_queue_free(adapter, q);
1916 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001917}
1918
Sathya Perladafc0fe2011-10-24 02:45:02 +00001919static int be_num_txqs_want(struct be_adapter *adapter)
1920{
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001921 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1922 be_is_mc(adapter) ||
1923 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
Sathya Perlaca34fe32012-11-06 17:48:56 +00001924 BE2_chip(adapter))
Sathya Perladafc0fe2011-10-24 02:45:02 +00001925 return 1;
1926 else
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001927 return adapter->max_tx_queues;
Sathya Perladafc0fe2011-10-24 02:45:02 +00001928}
1929
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001930static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001931{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001932 struct be_queue_info *cq, *eq;
1933 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001934 struct be_tx_obj *txo;
1935 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001936
Sathya Perladafc0fe2011-10-24 02:45:02 +00001937 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001938 if (adapter->num_tx_qs != MAX_TX_QS) {
1939 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001940 netif_set_real_num_tx_queues(adapter->netdev,
1941 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001942 rtnl_unlock();
1943 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001944
Sathya Perla3c8def92011-06-12 20:01:58 +00001945 for_all_tx_queues(adapter, txo, i) {
1946 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001947 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1948 sizeof(struct be_eth_tx_compl));
1949 if (status)
1950 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001951
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001952 /* If num_evt_qs is less than num_tx_qs, then more than
1953 * one txq share an eq
1954 */
1955 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1956 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1957 if (status)
1958 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001959 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001960 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001961}
1962
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001963static int be_tx_qs_create(struct be_adapter *adapter)
1964{
1965 struct be_tx_obj *txo;
1966 int i, status;
1967
1968 for_all_tx_queues(adapter, txo, i) {
1969 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1970 sizeof(struct be_eth_wrb));
1971 if (status)
1972 return status;
1973
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001974 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001975 if (status)
1976 return status;
1977 }
1978
Sathya Perlad3791422012-09-28 04:39:44 +00001979 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1980 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001981 return 0;
1982}
1983
1984static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001985{
1986 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001987 struct be_rx_obj *rxo;
1988 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001989
Sathya Perla3abcded2010-10-03 22:12:27 -07001990 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001991 q = &rxo->cq;
1992 if (q->created)
1993 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1994 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001995 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001996}
1997
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001998static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001999{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002000 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002001 struct be_rx_obj *rxo;
2002 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002003
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002004 /* We'll create as many RSS rings as there are irqs.
2005 * But when there's only one irq there's no use creating RSS rings
2006 */
2007 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2008 num_irqs(adapter) + 1 : 1;
Sathya Perla7f640062012-06-05 19:37:20 +00002009 if (adapter->num_rx_qs != MAX_RX_QS) {
2010 rtnl_lock();
2011 netif_set_real_num_rx_queues(adapter->netdev,
2012 adapter->num_rx_qs);
2013 rtnl_unlock();
2014 }
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002015
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002016 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002017 for_all_rx_queues(adapter, rxo, i) {
2018 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002019 cq = &rxo->cq;
2020 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2021 sizeof(struct be_eth_rx_compl));
2022 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002023 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002024
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002025 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2026 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002027 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002028 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002029 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002030
Sathya Perlad3791422012-09-28 04:39:44 +00002031 dev_info(&adapter->pdev->dev,
2032 "created %d RSS queue(s) and 1 default RX queue\n",
2033 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002034 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002035}
2036
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002037static irqreturn_t be_intx(int irq, void *dev)
2038{
Sathya Perlae49cc342012-11-27 19:50:02 +00002039 struct be_eq_obj *eqo = dev;
2040 struct be_adapter *adapter = eqo->adapter;
2041 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002042
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002043 /* IRQ is not expected when NAPI is scheduled as the EQ
2044 * will not be armed.
2045 * But, this can happen on Lancer INTx where it takes
2046 * a while to de-assert INTx or in BE2 where occasionaly
2047 * an interrupt may be raised even when EQ is unarmed.
2048 * If NAPI is already scheduled, then counting & notifying
2049 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002050 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002051 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002052 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002053 __napi_schedule(&eqo->napi);
2054 if (num_evts)
2055 eqo->spurious_intr = 0;
2056 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002057 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002058
2059 /* Return IRQ_HANDLED only for the the first spurious intr
2060 * after a valid intr to stop the kernel from branding
2061 * this irq as a bad one!
2062 */
2063 if (num_evts || eqo->spurious_intr++ == 0)
2064 return IRQ_HANDLED;
2065 else
2066 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002067}
2068
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002069static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002070{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002071 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002072
Sathya Perla0b545a62012-11-23 00:27:18 +00002073 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2074 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002075 return IRQ_HANDLED;
2076}
2077
Sathya Perla2e588f82011-03-11 02:49:26 +00002078static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002079{
Sathya Perla2e588f82011-03-11 02:49:26 +00002080 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002081}
2082
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002083static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2084 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002085{
Sathya Perla3abcded2010-10-03 22:12:27 -07002086 struct be_adapter *adapter = rxo->adapter;
2087 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002088 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002089 u32 work_done;
2090
2091 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002092 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002093 if (!rxcp)
2094 break;
2095
Sathya Perla12004ae2011-08-02 19:57:46 +00002096 /* Is it a flush compl that has no data */
2097 if (unlikely(rxcp->num_rcvd == 0))
2098 goto loop_continue;
2099
2100 /* Discard compl with partial DMA Lancer B0 */
2101 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002102 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002103 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002104 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002105
Sathya Perla12004ae2011-08-02 19:57:46 +00002106 /* On BE drop pkts that arrive due to imperfect filtering in
2107 * promiscuous mode on some skews
2108 */
2109 if (unlikely(rxcp->port != adapter->port_num &&
2110 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002111 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002112 goto loop_continue;
2113 }
2114
2115 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002116 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002117 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002118 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002119loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002120 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002121 }
2122
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002123 if (work_done) {
2124 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002125
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002126 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2127 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002128 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002129
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002130 return work_done;
2131}
2132
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002133static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2134 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002135{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002136 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002137 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002138
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002139 for (work_done = 0; work_done < budget; work_done++) {
2140 txcp = be_tx_compl_get(&txo->cq);
2141 if (!txcp)
2142 break;
2143 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002144 AMAP_GET_BITS(struct amap_eth_tx_compl,
2145 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002146 }
2147
2148 if (work_done) {
2149 be_cq_notify(adapter, txo->cq.id, true, work_done);
2150 atomic_sub(num_wrbs, &txo->q.used);
2151
2152 /* As Tx wrbs have been freed up, wake up netdev queue
2153 * if it was stopped due to lack of tx wrbs. */
2154 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2155 atomic_read(&txo->q.used) < txo->q.len / 2) {
2156 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002157 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002158
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002159 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2160 tx_stats(txo)->tx_compl += work_done;
2161 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2162 }
2163 return (work_done < budget); /* Done */
2164}
Sathya Perla3c8def92011-06-12 20:01:58 +00002165
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002166int be_poll(struct napi_struct *napi, int budget)
2167{
2168 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2169 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002170 int max_work = 0, work, i, num_evts;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002171 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002172
Sathya Perla0b545a62012-11-23 00:27:18 +00002173 num_evts = events_get(eqo);
2174
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002175 /* Process all TXQs serviced by this EQ */
2176 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2177 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2178 eqo->tx_budget, i);
2179 if (!tx_done)
2180 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002181 }
2182
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002183 /* This loop will iterate twice for EQ0 in which
2184 * completions of the last RXQ (default one) are also processed
2185 * For other EQs the loop iterates only once
2186 */
2187 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2188 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2189 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002190 }
2191
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002192 if (is_mcc_eqo(eqo))
2193 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002194
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002195 if (max_work < budget) {
2196 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002197 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002198 } else {
2199 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002200 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002201 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002202 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002203}
2204
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002205void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002206{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002207 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2208 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002209 u32 i;
2210
Sathya Perlad23e9462012-12-17 19:38:51 +00002211 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002212 return;
2213
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002214 if (lancer_chip(adapter)) {
2215 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2216 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2217 sliport_err1 = ioread32(adapter->db +
2218 SLIPORT_ERROR1_OFFSET);
2219 sliport_err2 = ioread32(adapter->db +
2220 SLIPORT_ERROR2_OFFSET);
2221 }
2222 } else {
2223 pci_read_config_dword(adapter->pdev,
2224 PCICFG_UE_STATUS_LOW, &ue_lo);
2225 pci_read_config_dword(adapter->pdev,
2226 PCICFG_UE_STATUS_HIGH, &ue_hi);
2227 pci_read_config_dword(adapter->pdev,
2228 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2229 pci_read_config_dword(adapter->pdev,
2230 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002231
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002232 ue_lo = (ue_lo & ~ue_lo_mask);
2233 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002234 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002235
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002236 /* On certain platforms BE hardware can indicate spurious UEs.
2237 * Allow the h/w to stop working completely in case of a real UE.
2238 * Hence not setting the hw_error for UE detection.
2239 */
2240 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002241 adapter->hw_error = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002242 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002243 "Error detected in the card\n");
2244 }
2245
2246 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2247 dev_err(&adapter->pdev->dev,
2248 "ERR: sliport status 0x%x\n", sliport_status);
2249 dev_err(&adapter->pdev->dev,
2250 "ERR: sliport error1 0x%x\n", sliport_err1);
2251 dev_err(&adapter->pdev->dev,
2252 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002253 }
2254
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002255 if (ue_lo) {
2256 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2257 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002258 dev_err(&adapter->pdev->dev,
2259 "UE: %s bit set\n", ue_status_low_desc[i]);
2260 }
2261 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002262
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002263 if (ue_hi) {
2264 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2265 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002266 dev_err(&adapter->pdev->dev,
2267 "UE: %s bit set\n", ue_status_hi_desc[i]);
2268 }
2269 }
2270
2271}
2272
Sathya Perla8d56ff12009-11-22 22:02:26 +00002273static void be_msix_disable(struct be_adapter *adapter)
2274{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002275 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002276 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002277 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002278 }
2279}
2280
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002281static uint be_num_rss_want(struct be_adapter *adapter)
2282{
Yuval Mintz30e80b52012-07-01 03:19:00 +00002283 u32 num = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002284
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002285 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002286 (lancer_chip(adapter) ||
2287 (!sriov_want(adapter) && be_physfn(adapter)))) {
2288 num = adapter->max_rss_queues;
Yuval Mintz30e80b52012-07-01 03:19:00 +00002289 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2290 }
2291 return num;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002292}
2293
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002294static void be_msix_enable(struct be_adapter *adapter)
2295{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002296#define BE_MIN_MSIX_VECTORS 1
Parav Pandit045508a2012-03-26 14:27:13 +00002297 int i, status, num_vec, num_roce_vec = 0;
Sathya Perlad3791422012-09-28 04:39:44 +00002298 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002299
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002300 /* If RSS queues are not used, need a vec for default RX Q */
2301 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
Parav Pandit045508a2012-03-26 14:27:13 +00002302 if (be_roce_supported(adapter)) {
2303 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2304 (num_online_cpus() + 1));
2305 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2306 num_vec += num_roce_vec;
2307 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2308 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002309 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002310
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002311 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002312 adapter->msix_entries[i].entry = i;
2313
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002314 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002315 if (status == 0) {
2316 goto done;
2317 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002318 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002319 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002320 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002321 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002322 }
Sathya Perlad3791422012-09-28 04:39:44 +00002323
2324 dev_warn(dev, "MSIx enable failed\n");
Sathya Perla3abcded2010-10-03 22:12:27 -07002325 return;
2326done:
Parav Pandit045508a2012-03-26 14:27:13 +00002327 if (be_roce_supported(adapter)) {
2328 if (num_vec > num_roce_vec) {
2329 adapter->num_msix_vec = num_vec - num_roce_vec;
2330 adapter->num_msix_roce_vec =
2331 num_vec - adapter->num_msix_vec;
2332 } else {
2333 adapter->num_msix_vec = num_vec;
2334 adapter->num_msix_roce_vec = 0;
2335 }
2336 } else
2337 adapter->num_msix_vec = num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002338 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002339 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002340}
2341
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002342static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002343 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002344{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002345 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002346}
2347
2348static int be_msix_register(struct be_adapter *adapter)
2349{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002350 struct net_device *netdev = adapter->netdev;
2351 struct be_eq_obj *eqo;
2352 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002353
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002354 for_all_evt_queues(adapter, eqo, i) {
2355 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2356 vec = be_msix_vec_get(adapter, eqo);
2357 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002358 if (status)
2359 goto err_msix;
2360 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002361
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002362 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002363err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002364 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2365 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2366 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2367 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002368 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002369 return status;
2370}
2371
2372static int be_irq_register(struct be_adapter *adapter)
2373{
2374 struct net_device *netdev = adapter->netdev;
2375 int status;
2376
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002377 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002378 status = be_msix_register(adapter);
2379 if (status == 0)
2380 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002381 /* INTx is not supported for VF */
2382 if (!be_physfn(adapter))
2383 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002384 }
2385
Sathya Perlae49cc342012-11-27 19:50:02 +00002386 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002387 netdev->irq = adapter->pdev->irq;
2388 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002389 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002390 if (status) {
2391 dev_err(&adapter->pdev->dev,
2392 "INTx request IRQ failed - err %d\n", status);
2393 return status;
2394 }
2395done:
2396 adapter->isr_registered = true;
2397 return 0;
2398}
2399
2400static void be_irq_unregister(struct be_adapter *adapter)
2401{
2402 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002403 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002404 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002405
2406 if (!adapter->isr_registered)
2407 return;
2408
2409 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002410 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002411 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002412 goto done;
2413 }
2414
2415 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002416 for_all_evt_queues(adapter, eqo, i)
2417 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002418
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002419done:
2420 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002421}
2422
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002423static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002424{
2425 struct be_queue_info *q;
2426 struct be_rx_obj *rxo;
2427 int i;
2428
2429 for_all_rx_queues(adapter, rxo, i) {
2430 q = &rxo->q;
2431 if (q->created) {
2432 be_cmd_rxq_destroy(adapter, q);
2433 /* After the rxq is invalidated, wait for a grace time
2434 * of 1ms for all dma to end and the flush compl to
2435 * arrive
2436 */
2437 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002438 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002439 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002440 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002441 }
2442}
2443
Sathya Perla889cd4b2010-05-30 23:33:45 +00002444static int be_close(struct net_device *netdev)
2445{
2446 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002447 struct be_eq_obj *eqo;
2448 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002449
Parav Pandit045508a2012-03-26 14:27:13 +00002450 be_roce_dev_close(adapter);
2451
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002452 for_all_evt_queues(adapter, eqo, i)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002453 napi_disable(&eqo->napi);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002454
2455 be_async_mcc_disable(adapter);
2456
2457 /* Wait for all pending tx completions to arrive so that
2458 * all tx skbs are freed.
2459 */
2460 be_tx_compl_clean(adapter);
2461
2462 be_rx_qs_destroy(adapter);
2463
2464 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002465 if (msix_enabled(adapter))
2466 synchronize_irq(be_msix_vec_get(adapter, eqo));
2467 else
2468 synchronize_irq(netdev->irq);
2469 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002470 }
2471
Sathya Perla889cd4b2010-05-30 23:33:45 +00002472 be_irq_unregister(adapter);
2473
Sathya Perla482c9e72011-06-29 23:33:17 +00002474 return 0;
2475}
2476
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002477static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002478{
2479 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002480 int rc, i, j;
2481 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002482
2483 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002484 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2485 sizeof(struct be_eth_rx_d));
2486 if (rc)
2487 return rc;
2488 }
2489
2490 /* The FW would like the default RXQ to be created first */
2491 rxo = default_rxo(adapter);
2492 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2493 adapter->if_handle, false, &rxo->rss_id);
2494 if (rc)
2495 return rc;
2496
2497 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002498 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002499 rx_frag_size, adapter->if_handle,
2500 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002501 if (rc)
2502 return rc;
2503 }
2504
2505 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002506 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2507 for_all_rss_queues(adapter, rxo, i) {
2508 if ((j + i) >= 128)
2509 break;
2510 rsstable[j + i] = rxo->rss_id;
2511 }
2512 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002513 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2514 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2515
2516 if (!BEx_chip(adapter))
2517 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2518 RSS_ENABLE_UDP_IPV6;
2519
2520 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2521 128);
2522 if (rc) {
2523 adapter->rss_flags = 0;
Sathya Perla482c9e72011-06-29 23:33:17 +00002524 return rc;
Suresh Reddy594ad542013-04-25 23:03:20 +00002525 }
Sathya Perla482c9e72011-06-29 23:33:17 +00002526 }
2527
2528 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002529 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002530 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002531 return 0;
2532}
2533
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002534static int be_open(struct net_device *netdev)
2535{
2536 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002537 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002538 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002539 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002540 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002541 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002542
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002543 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002544 if (status)
2545 goto err;
2546
Sathya Perla5fb379e2009-06-18 00:02:59 +00002547 be_irq_register(adapter);
2548
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002549 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002550 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002551
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002552 for_all_tx_queues(adapter, txo, i)
2553 be_cq_notify(adapter, txo->cq.id, true, 0);
2554
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002555 be_async_mcc_enable(adapter);
2556
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002557 for_all_evt_queues(adapter, eqo, i) {
2558 napi_enable(&eqo->napi);
2559 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2560 }
2561
Sathya Perla323ff712012-09-28 04:39:43 +00002562 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002563 if (!status)
2564 be_link_status_update(adapter, link_status);
2565
Parav Pandit045508a2012-03-26 14:27:13 +00002566 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002567 return 0;
2568err:
2569 be_close(adapter->netdev);
2570 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002571}
2572
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002573static int be_setup_wol(struct be_adapter *adapter, bool enable)
2574{
2575 struct be_dma_mem cmd;
2576 int status = 0;
2577 u8 mac[ETH_ALEN];
2578
2579 memset(mac, 0, ETH_ALEN);
2580
2581 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002582 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
Joe Perches1f9061d22013-03-15 07:23:58 +00002583 GFP_KERNEL | __GFP_ZERO);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002584 if (cmd.va == NULL)
2585 return -1;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002586
2587 if (enable) {
2588 status = pci_write_config_dword(adapter->pdev,
2589 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2590 if (status) {
2591 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002592 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002593 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2594 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002595 return status;
2596 }
2597 status = be_cmd_enable_magic_wol(adapter,
2598 adapter->netdev->dev_addr, &cmd);
2599 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2600 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2601 } else {
2602 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2603 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2604 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2605 }
2606
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002607 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002608 return status;
2609}
2610
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002611/*
2612 * Generate a seed MAC address from the PF MAC Address using jhash.
2613 * MAC Address for VFs are assigned incrementally starting from the seed.
2614 * These addresses are programmed in the ASIC by the PF and the VF driver
2615 * queries for the MAC address during its probe.
2616 */
Sathya Perla4c876612013-02-03 20:30:11 +00002617static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002618{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002619 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002620 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002621 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002622 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002623
2624 be_vf_eth_addr_generate(adapter, mac);
2625
Sathya Perla11ac75e2011-12-13 00:58:50 +00002626 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002627 if (lancer_chip(adapter)) {
2628 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2629 } else {
2630 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002631 vf_cfg->if_handle,
2632 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002633 }
2634
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002635 if (status)
2636 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002637 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002638 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002639 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002640
2641 mac[5] += 1;
2642 }
2643 return status;
2644}
2645
Sathya Perla4c876612013-02-03 20:30:11 +00002646static int be_vfs_mac_query(struct be_adapter *adapter)
2647{
2648 int status, vf;
2649 u8 mac[ETH_ALEN];
2650 struct be_vf_cfg *vf_cfg;
2651 bool active;
2652
2653 for_all_vfs(adapter, vf_cfg, vf) {
2654 be_cmd_get_mac_from_list(adapter, mac, &active,
2655 &vf_cfg->pmac_id, 0);
2656
2657 status = be_cmd_mac_addr_query(adapter, mac, false,
2658 vf_cfg->if_handle, 0);
2659 if (status)
2660 return status;
2661 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2662 }
2663 return 0;
2664}
2665
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002666static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002667{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002668 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002669 u32 vf;
2670
Sathya Perla39f1d942012-05-08 19:41:24 +00002671 if (be_find_vfs(adapter, ASSIGNED)) {
Sathya Perla4c876612013-02-03 20:30:11 +00002672 dev_warn(&adapter->pdev->dev,
2673 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00002674 goto done;
2675 }
2676
Sathya Perla11ac75e2011-12-13 00:58:50 +00002677 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002678 if (lancer_chip(adapter))
2679 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2680 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002681 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2682 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002683
Sathya Perla11ac75e2011-12-13 00:58:50 +00002684 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2685 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002686 pci_disable_sriov(adapter->pdev);
2687done:
2688 kfree(adapter->vf_cfg);
2689 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002690}
2691
Sathya Perlaa54769f2011-10-24 02:45:00 +00002692static int be_clear(struct be_adapter *adapter)
2693{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002694 int i = 1;
2695
Sathya Perla191eb752012-02-23 18:50:13 +00002696 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2697 cancel_delayed_work_sync(&adapter->work);
2698 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2699 }
2700
Sathya Perla11ac75e2011-12-13 00:58:50 +00002701 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002702 be_vf_clear(adapter);
2703
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002704 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2705 be_cmd_pmac_del(adapter, adapter->if_handle,
2706 adapter->pmac_id[i], 0);
2707
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002708 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002709
2710 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002711 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002712 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002713 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002714
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002715 kfree(adapter->pmac_id);
2716 adapter->pmac_id = NULL;
2717
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002718 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002719 return 0;
2720}
2721
Sathya Perla4c876612013-02-03 20:30:11 +00002722static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002723{
Sathya Perla4c876612013-02-03 20:30:11 +00002724 struct be_vf_cfg *vf_cfg;
2725 u32 cap_flags, en_flags, vf;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002726 int status;
2727
Sathya Perla4c876612013-02-03 20:30:11 +00002728 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2729 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002730
Sathya Perla4c876612013-02-03 20:30:11 +00002731 for_all_vfs(adapter, vf_cfg, vf) {
2732 if (!BE3_chip(adapter))
Vasundhara Volama05f99d2013-04-21 23:28:17 +00002733 be_cmd_get_profile_config(adapter, &cap_flags,
2734 NULL, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00002735
2736 /* If a FW profile exists, then cap_flags are updated */
2737 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2738 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2739 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2740 &vf_cfg->if_handle, vf + 1);
2741 if (status)
2742 goto err;
2743 }
2744err:
2745 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002746}
2747
Sathya Perla39f1d942012-05-08 19:41:24 +00002748static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002749{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002750 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002751 int vf;
2752
Sathya Perla39f1d942012-05-08 19:41:24 +00002753 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2754 GFP_KERNEL);
2755 if (!adapter->vf_cfg)
2756 return -ENOMEM;
2757
Sathya Perla11ac75e2011-12-13 00:58:50 +00002758 for_all_vfs(adapter, vf_cfg, vf) {
2759 vf_cfg->if_handle = -1;
2760 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002761 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002762 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002763}
2764
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002765static int be_vf_setup(struct be_adapter *adapter)
2766{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002767 struct be_vf_cfg *vf_cfg;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002768 u16 def_vlan, lnk_speed;
Sathya Perla4c876612013-02-03 20:30:11 +00002769 int status, old_vfs, vf;
2770 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002771
Sathya Perla4c876612013-02-03 20:30:11 +00002772 old_vfs = be_find_vfs(adapter, ENABLED);
2773 if (old_vfs) {
2774 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2775 if (old_vfs != num_vfs)
2776 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2777 adapter->num_vfs = old_vfs;
Sathya Perla39f1d942012-05-08 19:41:24 +00002778 } else {
Sathya Perla4c876612013-02-03 20:30:11 +00002779 if (num_vfs > adapter->dev_num_vfs)
2780 dev_info(dev, "Device supports %d VFs and not %d\n",
2781 adapter->dev_num_vfs, num_vfs);
2782 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
2783
2784 status = pci_enable_sriov(adapter->pdev, num_vfs);
2785 if (status) {
2786 dev_err(dev, "SRIOV enable failed\n");
2787 adapter->num_vfs = 0;
2788 return 0;
2789 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002790 }
2791
2792 status = be_vf_setup_init(adapter);
2793 if (status)
2794 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002795
Sathya Perla4c876612013-02-03 20:30:11 +00002796 if (old_vfs) {
2797 for_all_vfs(adapter, vf_cfg, vf) {
2798 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2799 if (status)
2800 goto err;
2801 }
2802 } else {
2803 status = be_vfs_if_create(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002804 if (status)
2805 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002806 }
2807
Sathya Perla4c876612013-02-03 20:30:11 +00002808 if (old_vfs) {
2809 status = be_vfs_mac_query(adapter);
2810 if (status)
2811 goto err;
2812 } else {
Sathya Perla39f1d942012-05-08 19:41:24 +00002813 status = be_vf_eth_addr_config(adapter);
2814 if (status)
2815 goto err;
2816 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002817
Sathya Perla11ac75e2011-12-13 00:58:50 +00002818 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla4c876612013-02-03 20:30:11 +00002819 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2820 * Allow full available bandwidth
2821 */
2822 if (BE3_chip(adapter) && !old_vfs)
2823 be_cmd_set_qos(adapter, 1000, vf+1);
2824
2825 status = be_cmd_link_status_query(adapter, &lnk_speed,
2826 NULL, vf + 1);
2827 if (!status)
2828 vf_cfg->tx_rate = lnk_speed;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002829
2830 status = be_cmd_get_hsw_config(adapter, &def_vlan,
Sathya Perla4c876612013-02-03 20:30:11 +00002831 vf + 1, vf_cfg->if_handle);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002832 if (status)
2833 goto err;
2834 vf_cfg->def_vid = def_vlan;
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00002835
2836 be_cmd_enable_vf(adapter, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002837 }
2838 return 0;
2839err:
Sathya Perla4c876612013-02-03 20:30:11 +00002840 dev_err(dev, "VF setup failed\n");
2841 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002842 return status;
2843}
2844
Sathya Perla30128032011-11-10 19:17:57 +00002845static void be_setup_init(struct be_adapter *adapter)
2846{
2847 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002848 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002849 adapter->if_handle = -1;
2850 adapter->be3_native = false;
2851 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002852 if (be_physfn(adapter))
2853 adapter->cmd_privileges = MAX_PRIVILEGES;
2854 else
2855 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00002856}
2857
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002858static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2859 bool *active_mac, u32 *pmac_id)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002860{
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002861 int status = 0;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002862
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002863 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2864 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2865 if (!lancer_chip(adapter) && !be_physfn(adapter))
2866 *active_mac = true;
2867 else
2868 *active_mac = false;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002869
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002870 return status;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002871 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002872
2873 if (lancer_chip(adapter)) {
2874 status = be_cmd_get_mac_from_list(adapter, mac,
2875 active_mac, pmac_id, 0);
2876 if (*active_mac) {
Sathya Perla5ee49792012-09-28 04:39:41 +00002877 status = be_cmd_mac_addr_query(adapter, mac, false,
2878 if_handle, *pmac_id);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002879 }
2880 } else if (be_physfn(adapter)) {
2881 /* For BE3, for PF get permanent MAC */
Sathya Perla5ee49792012-09-28 04:39:41 +00002882 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002883 *active_mac = false;
2884 } else {
2885 /* For BE3, for VF get soft MAC assigned by PF*/
Sathya Perla5ee49792012-09-28 04:39:41 +00002886 status = be_cmd_mac_addr_query(adapter, mac, false,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002887 if_handle, 0);
2888 *active_mac = true;
2889 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002890 return status;
2891}
2892
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002893static void be_get_resources(struct be_adapter *adapter)
2894{
Sathya Perla4c876612013-02-03 20:30:11 +00002895 u16 dev_num_vfs;
2896 int pos, status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002897 bool profile_present = false;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00002898 u16 txq_count = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002899
Sathya Perla4c876612013-02-03 20:30:11 +00002900 if (!BEx_chip(adapter)) {
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002901 status = be_cmd_get_func_config(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002902 if (!status)
2903 profile_present = true;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00002904 } else if (BE3_chip(adapter) && be_physfn(adapter)) {
2905 be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002906 }
2907
2908 if (profile_present) {
2909 /* Sanity fixes for Lancer */
2910 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2911 BE_UC_PMAC_COUNT);
2912 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2913 BE_NUM_VLANS_SUPPORTED);
2914 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2915 BE_MAX_MC);
2916 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2917 MAX_TX_QS);
2918 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2919 BE3_MAX_RSS_QS);
2920 adapter->max_event_queues = min_t(u16,
2921 adapter->max_event_queues,
2922 BE3_MAX_RSS_QS);
2923
2924 if (adapter->max_rss_queues &&
2925 adapter->max_rss_queues == adapter->max_rx_queues)
2926 adapter->max_rss_queues -= 1;
2927
2928 if (adapter->max_event_queues < adapter->max_rss_queues)
2929 adapter->max_rss_queues = adapter->max_event_queues;
2930
2931 } else {
2932 if (be_physfn(adapter))
2933 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2934 else
2935 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2936
2937 if (adapter->function_mode & FLEX10_MODE)
2938 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2939 else
2940 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2941
2942 adapter->max_mcast_mac = BE_MAX_MC;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00002943 adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
2944 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2945 MAX_TX_QS);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002946 adapter->max_rss_queues = (adapter->be3_native) ?
2947 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2948 adapter->max_event_queues = BE3_MAX_RSS_QS;
2949
2950 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2951 BE_IF_FLAGS_BROADCAST |
2952 BE_IF_FLAGS_MULTICAST |
2953 BE_IF_FLAGS_PASS_L3L4_ERRORS |
2954 BE_IF_FLAGS_MCAST_PROMISCUOUS |
2955 BE_IF_FLAGS_VLAN_PROMISCUOUS |
2956 BE_IF_FLAGS_PROMISCUOUS;
2957
2958 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2959 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2960 }
Sathya Perla4c876612013-02-03 20:30:11 +00002961
2962 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2963 if (pos) {
2964 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2965 &dev_num_vfs);
2966 if (BE3_chip(adapter))
2967 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
2968 adapter->dev_num_vfs = dev_num_vfs;
2969 }
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002970}
2971
Sathya Perla39f1d942012-05-08 19:41:24 +00002972/* Routine to query per function resource limits */
2973static int be_get_config(struct be_adapter *adapter)
2974{
Sathya Perla4c876612013-02-03 20:30:11 +00002975 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00002976
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002977 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2978 &adapter->function_mode,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00002979 &adapter->function_caps,
2980 &adapter->asic_rev);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002981 if (status)
2982 goto err;
2983
2984 be_get_resources(adapter);
2985
2986 /* primary mac needs 1 pmac entry */
2987 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2988 sizeof(u32), GFP_KERNEL);
2989 if (!adapter->pmac_id) {
2990 status = -ENOMEM;
2991 goto err;
2992 }
2993
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002994err:
2995 return status;
Sathya Perla39f1d942012-05-08 19:41:24 +00002996}
2997
Sathya Perla5fb379e2009-06-18 00:02:59 +00002998static int be_setup(struct be_adapter *adapter)
2999{
Sathya Perla39f1d942012-05-08 19:41:24 +00003000 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003001 u32 en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003002 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003003 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003004 u8 mac[ETH_ALEN];
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003005 bool active_mac;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003006
Sathya Perla30128032011-11-10 19:17:57 +00003007 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003008
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003009 if (!lancer_chip(adapter))
3010 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003011
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003012 status = be_get_config(adapter);
3013 if (status)
3014 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003015
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003016 be_msix_enable(adapter);
3017
3018 status = be_evt_queues_create(adapter);
3019 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003020 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003021
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003022 status = be_tx_cqs_create(adapter);
3023 if (status)
3024 goto err;
3025
3026 status = be_rx_cqs_create(adapter);
3027 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003028 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003029
Sathya Perla5fb379e2009-06-18 00:02:59 +00003030 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003031 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003032 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003033
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003034 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3035 /* In UMC mode FW does not return right privileges.
3036 * Override with correct privilege equivalent to PF.
3037 */
3038 if (be_is_mc(adapter))
3039 adapter->cmd_privileges = MAX_PRIVILEGES;
3040
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003041 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3042 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00003043
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003044 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003045 en_flags |= BE_IF_FLAGS_RSS;
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003046
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003047 en_flags = en_flags & adapter->if_cap_flags;
Padmanabh Ratnakar0b13fb42012-07-18 02:51:58 +00003048
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003049 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003050 &adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003051 if (status != 0)
3052 goto err;
3053
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003054 memset(mac, 0, ETH_ALEN);
3055 active_mac = false;
3056 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3057 &active_mac, &adapter->pmac_id[0]);
3058 if (status != 0)
3059 goto err;
3060
3061 if (!active_mac) {
3062 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3063 &adapter->pmac_id[0], 0);
3064 if (status != 0)
3065 goto err;
3066 }
3067
3068 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3069 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3070 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003071 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00003072
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003073 status = be_tx_qs_create(adapter);
3074 if (status)
3075 goto err;
3076
Sathya Perla04b71172011-09-27 13:30:27 -04003077 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003078
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003079 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003080 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003081
3082 be_set_rx_mode(adapter->netdev);
3083
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003084 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003085
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003086 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3087 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003088 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003089
Sathya Perla39f1d942012-05-08 19:41:24 +00003090 if (be_physfn(adapter) && num_vfs) {
3091 if (adapter->dev_num_vfs)
3092 be_vf_setup(adapter);
3093 else
3094 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003095 }
3096
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003097 status = be_cmd_get_phy_info(adapter);
3098 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003099 adapter->phy.fc_autoneg = 1;
3100
Sathya Perla191eb752012-02-23 18:50:13 +00003101 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3102 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003103 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003104err:
3105 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003106 return status;
3107}
3108
Ivan Vecera66268732011-12-08 01:31:21 +00003109#ifdef CONFIG_NET_POLL_CONTROLLER
3110static void be_netpoll(struct net_device *netdev)
3111{
3112 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003113 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003114 int i;
3115
Sathya Perlae49cc342012-11-27 19:50:02 +00003116 for_all_evt_queues(adapter, eqo, i) {
3117 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3118 napi_schedule(&eqo->napi);
3119 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003120
3121 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003122}
3123#endif
3124
Ajit Khaparde84517482009-09-04 03:12:16 +00003125#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003126char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3127
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003128static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003129 const u8 *p, u32 img_start, int image_size,
3130 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003131{
3132 u32 crc_offset;
3133 u8 flashed_crc[4];
3134 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003135
3136 crc_offset = hdr_size + img_start + image_size - 4;
3137
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003138 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003139
3140 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003141 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003142 if (status) {
3143 dev_err(&adapter->pdev->dev,
3144 "could not get crc from flash, not flashing redboot\n");
3145 return false;
3146 }
3147
3148 /*update redboot only if crc does not match*/
3149 if (!memcmp(flashed_crc, p, 4))
3150 return false;
3151 else
3152 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003153}
3154
Sathya Perla306f1342011-08-02 19:57:45 +00003155static bool phy_flashing_required(struct be_adapter *adapter)
3156{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003157 return (adapter->phy.phy_type == TN_8022 &&
3158 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003159}
3160
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003161static bool is_comp_in_ufi(struct be_adapter *adapter,
3162 struct flash_section_info *fsec, int type)
3163{
3164 int i = 0, img_type = 0;
3165 struct flash_section_info_g2 *fsec_g2 = NULL;
3166
Sathya Perlaca34fe32012-11-06 17:48:56 +00003167 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003168 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3169
3170 for (i = 0; i < MAX_FLASH_COMP; i++) {
3171 if (fsec_g2)
3172 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3173 else
3174 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3175
3176 if (img_type == type)
3177 return true;
3178 }
3179 return false;
3180
3181}
3182
3183struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3184 int header_size,
3185 const struct firmware *fw)
3186{
3187 struct flash_section_info *fsec = NULL;
3188 const u8 *p = fw->data;
3189
3190 p += header_size;
3191 while (p < (fw->data + fw->size)) {
3192 fsec = (struct flash_section_info *)p;
3193 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3194 return fsec;
3195 p += 32;
3196 }
3197 return NULL;
3198}
3199
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003200static int be_flash(struct be_adapter *adapter, const u8 *img,
3201 struct be_dma_mem *flash_cmd, int optype, int img_size)
3202{
3203 u32 total_bytes = 0, flash_op, num_bytes = 0;
3204 int status = 0;
3205 struct be_cmd_write_flashrom *req = flash_cmd->va;
3206
3207 total_bytes = img_size;
3208 while (total_bytes) {
3209 num_bytes = min_t(u32, 32*1024, total_bytes);
3210
3211 total_bytes -= num_bytes;
3212
3213 if (!total_bytes) {
3214 if (optype == OPTYPE_PHY_FW)
3215 flash_op = FLASHROM_OPER_PHY_FLASH;
3216 else
3217 flash_op = FLASHROM_OPER_FLASH;
3218 } else {
3219 if (optype == OPTYPE_PHY_FW)
3220 flash_op = FLASHROM_OPER_PHY_SAVE;
3221 else
3222 flash_op = FLASHROM_OPER_SAVE;
3223 }
3224
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003225 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003226 img += num_bytes;
3227 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3228 flash_op, num_bytes);
3229 if (status) {
3230 if (status == ILLEGAL_IOCTL_REQ &&
3231 optype == OPTYPE_PHY_FW)
3232 break;
3233 dev_err(&adapter->pdev->dev,
3234 "cmd to write to flash rom failed.\n");
3235 return status;
3236 }
3237 }
3238 return 0;
3239}
3240
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003241/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003242static int be_flash_BEx(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003243 const struct firmware *fw,
3244 struct be_dma_mem *flash_cmd,
3245 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003246
Ajit Khaparde84517482009-09-04 03:12:16 +00003247{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003248 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003249 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003250 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003251 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003252 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003253 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003254
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003255 struct flash_comp gen3_flash_types[] = {
3256 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3257 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3258 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3259 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3260 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3261 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3262 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3263 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3264 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3265 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3266 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3267 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3268 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3269 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3270 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3271 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3272 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3273 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3274 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3275 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003276 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003277
3278 struct flash_comp gen2_flash_types[] = {
3279 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3280 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3281 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3282 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3283 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3284 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3285 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3286 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3287 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3288 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3289 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3290 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3291 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3292 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3293 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3294 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003295 };
3296
Sathya Perlaca34fe32012-11-06 17:48:56 +00003297 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003298 pflashcomp = gen3_flash_types;
3299 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003300 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003301 } else {
3302 pflashcomp = gen2_flash_types;
3303 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003304 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003305 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003306
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003307 /* Get flash section info*/
3308 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3309 if (!fsec) {
3310 dev_err(&adapter->pdev->dev,
3311 "Invalid Cookie. UFI corrupted ?\n");
3312 return -1;
3313 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003314 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003315 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003316 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003317
3318 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3319 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3320 continue;
3321
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003322 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3323 !phy_flashing_required(adapter))
3324 continue;
3325
3326 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3327 redboot = be_flash_redboot(adapter, fw->data,
3328 pflashcomp[i].offset, pflashcomp[i].size,
3329 filehdr_size + img_hdrs_size);
3330 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003331 continue;
3332 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003333
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003334 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003335 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003336 if (p + pflashcomp[i].size > fw->data + fw->size)
3337 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003338
3339 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3340 pflashcomp[i].size);
3341 if (status) {
3342 dev_err(&adapter->pdev->dev,
3343 "Flashing section type %d failed.\n",
3344 pflashcomp[i].img_type);
3345 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003346 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003347 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003348 return 0;
3349}
3350
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003351static int be_flash_skyhawk(struct be_adapter *adapter,
3352 const struct firmware *fw,
3353 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003354{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003355 int status = 0, i, filehdr_size = 0;
3356 int img_offset, img_size, img_optype, redboot;
3357 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3358 const u8 *p = fw->data;
3359 struct flash_section_info *fsec = NULL;
3360
3361 filehdr_size = sizeof(struct flash_file_hdr_g3);
3362 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3363 if (!fsec) {
3364 dev_err(&adapter->pdev->dev,
3365 "Invalid Cookie. UFI corrupted ?\n");
3366 return -1;
3367 }
3368
3369 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3370 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3371 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3372
3373 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3374 case IMAGE_FIRMWARE_iSCSI:
3375 img_optype = OPTYPE_ISCSI_ACTIVE;
3376 break;
3377 case IMAGE_BOOT_CODE:
3378 img_optype = OPTYPE_REDBOOT;
3379 break;
3380 case IMAGE_OPTION_ROM_ISCSI:
3381 img_optype = OPTYPE_BIOS;
3382 break;
3383 case IMAGE_OPTION_ROM_PXE:
3384 img_optype = OPTYPE_PXE_BIOS;
3385 break;
3386 case IMAGE_OPTION_ROM_FCoE:
3387 img_optype = OPTYPE_FCOE_BIOS;
3388 break;
3389 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3390 img_optype = OPTYPE_ISCSI_BACKUP;
3391 break;
3392 case IMAGE_NCSI:
3393 img_optype = OPTYPE_NCSI_FW;
3394 break;
3395 default:
3396 continue;
3397 }
3398
3399 if (img_optype == OPTYPE_REDBOOT) {
3400 redboot = be_flash_redboot(adapter, fw->data,
3401 img_offset, img_size,
3402 filehdr_size + img_hdrs_size);
3403 if (!redboot)
3404 continue;
3405 }
3406
3407 p = fw->data;
3408 p += filehdr_size + img_offset + img_hdrs_size;
3409 if (p + img_size > fw->data + fw->size)
3410 return -1;
3411
3412 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3413 if (status) {
3414 dev_err(&adapter->pdev->dev,
3415 "Flashing section type %d failed.\n",
3416 fsec->fsec_entry[i].type);
3417 return status;
3418 }
3419 }
3420 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003421}
3422
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003423static int lancer_wait_idle(struct be_adapter *adapter)
3424{
3425#define SLIPORT_IDLE_TIMEOUT 30
3426 u32 reg_val;
3427 int status = 0, i;
3428
3429 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3430 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3431 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3432 break;
3433
3434 ssleep(1);
3435 }
3436
3437 if (i == SLIPORT_IDLE_TIMEOUT)
3438 status = -1;
3439
3440 return status;
3441}
3442
3443static int lancer_fw_reset(struct be_adapter *adapter)
3444{
3445 int status = 0;
3446
3447 status = lancer_wait_idle(adapter);
3448 if (status)
3449 return status;
3450
3451 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3452 PHYSDEV_CONTROL_OFFSET);
3453
3454 return status;
3455}
3456
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003457static int lancer_fw_download(struct be_adapter *adapter,
3458 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003459{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003460#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3461#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3462 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003463 const u8 *data_ptr = NULL;
3464 u8 *dest_image_ptr = NULL;
3465 size_t image_size = 0;
3466 u32 chunk_size = 0;
3467 u32 data_written = 0;
3468 u32 offset = 0;
3469 int status = 0;
3470 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003471 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003472
3473 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3474 dev_err(&adapter->pdev->dev,
3475 "FW Image not properly aligned. "
3476 "Length must be 4 byte aligned.\n");
3477 status = -EINVAL;
3478 goto lancer_fw_exit;
3479 }
3480
3481 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3482 + LANCER_FW_DOWNLOAD_CHUNK;
3483 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00003484 &flash_cmd.dma, GFP_KERNEL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003485 if (!flash_cmd.va) {
3486 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003487 goto lancer_fw_exit;
3488 }
3489
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003490 dest_image_ptr = flash_cmd.va +
3491 sizeof(struct lancer_cmd_req_write_object);
3492 image_size = fw->size;
3493 data_ptr = fw->data;
3494
3495 while (image_size) {
3496 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3497
3498 /* Copy the image chunk content. */
3499 memcpy(dest_image_ptr, data_ptr, chunk_size);
3500
3501 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003502 chunk_size, offset,
3503 LANCER_FW_DOWNLOAD_LOCATION,
3504 &data_written, &change_status,
3505 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003506 if (status)
3507 break;
3508
3509 offset += data_written;
3510 data_ptr += data_written;
3511 image_size -= data_written;
3512 }
3513
3514 if (!status) {
3515 /* Commit the FW written */
3516 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003517 0, offset,
3518 LANCER_FW_DOWNLOAD_LOCATION,
3519 &data_written, &change_status,
3520 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003521 }
3522
3523 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3524 flash_cmd.dma);
3525 if (status) {
3526 dev_err(&adapter->pdev->dev,
3527 "Firmware load error. "
3528 "Status code: 0x%x Additional Status: 0x%x\n",
3529 status, add_status);
3530 goto lancer_fw_exit;
3531 }
3532
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003533 if (change_status == LANCER_FW_RESET_NEEDED) {
3534 status = lancer_fw_reset(adapter);
3535 if (status) {
3536 dev_err(&adapter->pdev->dev,
3537 "Adapter busy for FW reset.\n"
3538 "New FW will not be active.\n");
3539 goto lancer_fw_exit;
3540 }
3541 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3542 dev_err(&adapter->pdev->dev,
3543 "System reboot required for new FW"
3544 " to be active\n");
3545 }
3546
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003547 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3548lancer_fw_exit:
3549 return status;
3550}
3551
Sathya Perlaca34fe32012-11-06 17:48:56 +00003552#define UFI_TYPE2 2
3553#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003554#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00003555#define UFI_TYPE4 4
3556static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003557 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003558{
3559 if (fhdr == NULL)
3560 goto be_get_ufi_exit;
3561
Sathya Perlaca34fe32012-11-06 17:48:56 +00003562 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3563 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003564 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3565 if (fhdr->asic_type_rev == 0x10)
3566 return UFI_TYPE3R;
3567 else
3568 return UFI_TYPE3;
3569 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00003570 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003571
3572be_get_ufi_exit:
3573 dev_err(&adapter->pdev->dev,
3574 "UFI and Interface are not compatible for flashing\n");
3575 return -1;
3576}
3577
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003578static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3579{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003580 struct flash_file_hdr_g3 *fhdr3;
3581 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003582 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003583 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003584 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003585
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003586 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003587 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3588 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003589 if (!flash_cmd.va) {
3590 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003591 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003592 }
3593
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003594 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003595 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003596
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003597 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003598
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003599 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3600 for (i = 0; i < num_imgs; i++) {
3601 img_hdr_ptr = (struct image_hdr *)(fw->data +
3602 (sizeof(struct flash_file_hdr_g3) +
3603 i * sizeof(struct image_hdr)));
3604 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003605 switch (ufi_type) {
3606 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003607 status = be_flash_skyhawk(adapter, fw,
3608 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003609 break;
3610 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00003611 status = be_flash_BEx(adapter, fw, &flash_cmd,
3612 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003613 break;
3614 case UFI_TYPE3:
3615 /* Do not flash this ufi on BE3-R cards */
3616 if (adapter->asic_rev < 0x10)
3617 status = be_flash_BEx(adapter, fw,
3618 &flash_cmd,
3619 num_imgs);
3620 else {
3621 status = -1;
3622 dev_err(&adapter->pdev->dev,
3623 "Can't load BE3 UFI on BE3R\n");
3624 }
3625 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003626 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003627 }
3628
Sathya Perlaca34fe32012-11-06 17:48:56 +00003629 if (ufi_type == UFI_TYPE2)
3630 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003631 else if (ufi_type == -1)
3632 status = -1;
3633
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003634 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3635 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003636 if (status) {
3637 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003638 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003639 }
3640
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003641 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003642
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003643be_fw_exit:
3644 return status;
3645}
3646
3647int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3648{
3649 const struct firmware *fw;
3650 int status;
3651
3652 if (!netif_running(adapter->netdev)) {
3653 dev_err(&adapter->pdev->dev,
3654 "Firmware load not allowed (interface is down)\n");
3655 return -1;
3656 }
3657
3658 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3659 if (status)
3660 goto fw_exit;
3661
3662 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3663
3664 if (lancer_chip(adapter))
3665 status = lancer_fw_download(adapter, fw);
3666 else
3667 status = be_fw_download(adapter, fw);
3668
Ajit Khaparde84517482009-09-04 03:12:16 +00003669fw_exit:
3670 release_firmware(fw);
3671 return status;
3672}
3673
stephen hemmingere5686ad2012-01-05 19:10:25 +00003674static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003675 .ndo_open = be_open,
3676 .ndo_stop = be_close,
3677 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003678 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003679 .ndo_set_mac_address = be_mac_addr_set,
3680 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003681 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003682 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003683 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3684 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003685 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003686 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003687 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003688 .ndo_get_vf_config = be_get_vf_config,
3689#ifdef CONFIG_NET_POLL_CONTROLLER
3690 .ndo_poll_controller = be_netpoll,
3691#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003692};
3693
3694static void be_netdev_init(struct net_device *netdev)
3695{
3696 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003697 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003698 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003699
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003700 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003701 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00003702 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003703 if (be_multi_rxq(adapter))
3704 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003705
3706 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00003707 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003708
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003709 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003710 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003711
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003712 netdev->priv_flags |= IFF_UNICAST_FLT;
3713
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003714 netdev->flags |= IFF_MULTICAST;
3715
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00003716 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003717
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003718 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003719
3720 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3721
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003722 for_all_evt_queues(adapter, eqo, i)
3723 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003724}
3725
3726static void be_unmap_pci_bars(struct be_adapter *adapter)
3727{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003728 if (adapter->csr)
3729 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003730 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00003731 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003732}
3733
Sathya Perlace66f782012-11-06 17:48:58 +00003734static int db_bar(struct be_adapter *adapter)
3735{
3736 if (lancer_chip(adapter) || !be_physfn(adapter))
3737 return 0;
3738 else
3739 return 4;
3740}
3741
3742static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00003743{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00003744 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00003745 adapter->roce_db.size = 4096;
3746 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3747 db_bar(adapter));
3748 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3749 db_bar(adapter));
3750 }
Parav Pandit045508a2012-03-26 14:27:13 +00003751 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003752}
3753
3754static int be_map_pci_bars(struct be_adapter *adapter)
3755{
3756 u8 __iomem *addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003757 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003758
Sathya Perlace66f782012-11-06 17:48:58 +00003759 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3760 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3761 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003762
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003763 if (BEx_chip(adapter) && be_physfn(adapter)) {
3764 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3765 if (adapter->csr == NULL)
3766 return -ENOMEM;
3767 }
3768
Sathya Perlace66f782012-11-06 17:48:58 +00003769 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003770 if (addr == NULL)
3771 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003772 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003773
3774 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003775 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00003776
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003777pci_map_err:
3778 be_unmap_pci_bars(adapter);
3779 return -ENOMEM;
3780}
3781
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003782static void be_ctrl_cleanup(struct be_adapter *adapter)
3783{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003784 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003785
3786 be_unmap_pci_bars(adapter);
3787
3788 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003789 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3790 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003791
Sathya Perla5b8821b2011-08-02 19:57:44 +00003792 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003793 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003794 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3795 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003796}
3797
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003798static int be_ctrl_init(struct be_adapter *adapter)
3799{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003800 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3801 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003802 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00003803 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003804 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003805
Sathya Perlace66f782012-11-06 17:48:58 +00003806 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3807 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3808 SLI_INTF_FAMILY_SHIFT;
3809 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3810
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003811 status = be_map_pci_bars(adapter);
3812 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003813 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003814
3815 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003816 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3817 mbox_mem_alloc->size,
3818 &mbox_mem_alloc->dma,
3819 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003820 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003821 status = -ENOMEM;
3822 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003823 }
3824 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3825 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3826 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3827 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003828
Sathya Perla5b8821b2011-08-02 19:57:44 +00003829 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3830 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
Joe Perches1f9061d22013-03-15 07:23:58 +00003831 &rx_filter->dma,
3832 GFP_KERNEL | __GFP_ZERO);
Sathya Perla5b8821b2011-08-02 19:57:44 +00003833 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003834 status = -ENOMEM;
3835 goto free_mbox;
3836 }
Joe Perches1f9061d22013-03-15 07:23:58 +00003837
Ivan Vecera29849612010-12-14 05:43:19 +00003838 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003839 spin_lock_init(&adapter->mcc_lock);
3840 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003841
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003842 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003843 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003844 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003845
3846free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003847 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3848 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003849
3850unmap_pci_bars:
3851 be_unmap_pci_bars(adapter);
3852
3853done:
3854 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003855}
3856
3857static void be_stats_cleanup(struct be_adapter *adapter)
3858{
Sathya Perla3abcded2010-10-03 22:12:27 -07003859 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003860
3861 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003862 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3863 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003864}
3865
3866static int be_stats_init(struct be_adapter *adapter)
3867{
Sathya Perla3abcded2010-10-03 22:12:27 -07003868 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003869
Sathya Perlaca34fe32012-11-06 17:48:56 +00003870 if (lancer_chip(adapter))
3871 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3872 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003873 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Sathya Perlaca34fe32012-11-06 17:48:56 +00003874 else
3875 /* BE3 and Skyhawk */
3876 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3877
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003878 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
Joe Perches1f9061d22013-03-15 07:23:58 +00003879 GFP_KERNEL | __GFP_ZERO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003880 if (cmd->va == NULL)
3881 return -1;
3882 return 0;
3883}
3884
Bill Pemberton3bc6b062012-12-03 09:23:09 -05003885static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003886{
3887 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003888
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003889 if (!adapter)
3890 return;
3891
Parav Pandit045508a2012-03-26 14:27:13 +00003892 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00003893 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00003894
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003895 cancel_delayed_work_sync(&adapter->func_recovery_work);
3896
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003897 unregister_netdev(adapter->netdev);
3898
Sathya Perla5fb379e2009-06-18 00:02:59 +00003899 be_clear(adapter);
3900
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00003901 /* tell fw we're done with firing cmds */
3902 be_cmd_fw_clean(adapter);
3903
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003904 be_stats_cleanup(adapter);
3905
3906 be_ctrl_cleanup(adapter);
3907
Sathya Perlad6b6d982012-09-05 01:56:48 +00003908 pci_disable_pcie_error_reporting(pdev);
3909
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003910 pci_set_drvdata(pdev, NULL);
3911 pci_release_regions(pdev);
3912 pci_disable_device(pdev);
3913
3914 free_netdev(adapter->netdev);
3915}
3916
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003917bool be_is_wol_supported(struct be_adapter *adapter)
3918{
3919 return ((adapter->wol_cap & BE_WOL_CAP) &&
3920 !be_is_wol_excluded(adapter)) ? true : false;
3921}
3922
Somnath Kotur941a77d2012-05-17 22:59:03 +00003923u32 be_get_fw_log_level(struct be_adapter *adapter)
3924{
3925 struct be_dma_mem extfat_cmd;
3926 struct be_fat_conf_params *cfgs;
3927 int status;
3928 u32 level = 0;
3929 int j;
3930
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003931 if (lancer_chip(adapter))
3932 return 0;
3933
Somnath Kotur941a77d2012-05-17 22:59:03 +00003934 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3935 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3936 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3937 &extfat_cmd.dma);
3938
3939 if (!extfat_cmd.va) {
3940 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3941 __func__);
3942 goto err;
3943 }
3944
3945 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3946 if (!status) {
3947 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3948 sizeof(struct be_cmd_resp_hdr));
Anton Blanchardac46a462012-07-24 15:05:25 +00003949 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
Somnath Kotur941a77d2012-05-17 22:59:03 +00003950 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3951 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3952 }
3953 }
3954 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3955 extfat_cmd.dma);
3956err:
3957 return level;
3958}
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003959
Sathya Perla39f1d942012-05-08 19:41:24 +00003960static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003961{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003962 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00003963 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003964
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003965 status = be_cmd_get_cntl_attributes(adapter);
3966 if (status)
3967 return status;
3968
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003969 status = be_cmd_get_acpi_wol_cap(adapter);
3970 if (status) {
3971 /* in case of a failure to get wol capabillities
3972 * check the exclusion list to determine WOL capability */
3973 if (!be_is_wol_excluded(adapter))
3974 adapter->wol_cap |= BE_WOL_CAP;
3975 }
3976
3977 if (be_is_wol_supported(adapter))
3978 adapter->wol = true;
3979
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00003980 /* Must be a power of 2 or else MODULO will BUG_ON */
3981 adapter->be_get_temp_freq = 64;
3982
Somnath Kotur941a77d2012-05-17 22:59:03 +00003983 level = be_get_fw_log_level(adapter);
3984 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3985
Sathya Perla2243e2e2009-11-22 22:02:03 +00003986 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003987}
3988
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003989static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003990{
3991 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003992
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003993 status = lancer_test_and_set_rdy_state(adapter);
3994 if (status)
3995 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003996
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003997 if (netif_running(adapter->netdev))
3998 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003999
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004000 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004001
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004002 adapter->hw_error = false;
4003 adapter->fw_timeout = false;
4004
4005 status = be_setup(adapter);
4006 if (status)
4007 goto err;
4008
4009 if (netif_running(adapter->netdev)) {
4010 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004011 if (status)
4012 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004013 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004014
4015 dev_err(&adapter->pdev->dev,
4016 "Adapter SLIPORT recovery succeeded\n");
4017 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004018err:
Padmanabh Ratnakar67297ad2012-10-20 06:02:27 +00004019 if (adapter->eeh_error)
4020 dev_err(&adapter->pdev->dev,
4021 "Adapter SLIPORT recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004022
4023 return status;
4024}
4025
4026static void be_func_recovery_task(struct work_struct *work)
4027{
4028 struct be_adapter *adapter =
4029 container_of(work, struct be_adapter, func_recovery_work.work);
4030 int status;
4031
4032 be_detect_error(adapter);
4033
4034 if (adapter->hw_error && lancer_chip(adapter)) {
4035
4036 if (adapter->eeh_error)
4037 goto out;
4038
4039 rtnl_lock();
4040 netif_device_detach(adapter->netdev);
4041 rtnl_unlock();
4042
4043 status = lancer_recover_func(adapter);
4044
4045 if (!status)
4046 netif_device_attach(adapter->netdev);
4047 }
4048
4049out:
4050 schedule_delayed_work(&adapter->func_recovery_work,
4051 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004052}
4053
4054static void be_worker(struct work_struct *work)
4055{
4056 struct be_adapter *adapter =
4057 container_of(work, struct be_adapter, work.work);
4058 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004059 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004060 int i;
4061
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004062 /* when interrupts are not yet enabled, just reap any pending
4063 * mcc completions */
4064 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004065 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004066 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004067 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004068 goto reschedule;
4069 }
4070
4071 if (!adapter->stats_cmd_sent) {
4072 if (lancer_chip(adapter))
4073 lancer_cmd_get_pport_stats(adapter,
4074 &adapter->stats_cmd);
4075 else
4076 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4077 }
4078
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004079 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4080 be_cmd_get_die_temperature(adapter);
4081
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004082 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004083 if (rxo->rx_post_starved) {
4084 rxo->rx_post_starved = false;
4085 be_post_rx_frags(rxo, GFP_KERNEL);
4086 }
4087 }
4088
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004089 for_all_evt_queues(adapter, eqo, i)
4090 be_eqd_update(adapter, eqo);
4091
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004092reschedule:
4093 adapter->work_counter++;
4094 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4095}
4096
Sathya Perla39f1d942012-05-08 19:41:24 +00004097static bool be_reset_required(struct be_adapter *adapter)
4098{
Sathya Perlad79c0a22012-06-05 19:37:22 +00004099 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004100}
4101
Sathya Perlad3791422012-09-28 04:39:44 +00004102static char *mc_name(struct be_adapter *adapter)
4103{
4104 if (adapter->function_mode & FLEX10_MODE)
4105 return "FLEX10";
4106 else if (adapter->function_mode & VNIC_MODE)
4107 return "vNIC";
4108 else if (adapter->function_mode & UMC_ENABLED)
4109 return "UMC";
4110 else
4111 return "";
4112}
4113
4114static inline char *func_name(struct be_adapter *adapter)
4115{
4116 return be_physfn(adapter) ? "PF" : "VF";
4117}
4118
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004119static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004120{
4121 int status = 0;
4122 struct be_adapter *adapter;
4123 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004124 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004125
4126 status = pci_enable_device(pdev);
4127 if (status)
4128 goto do_none;
4129
4130 status = pci_request_regions(pdev, DRV_NAME);
4131 if (status)
4132 goto disable_dev;
4133 pci_set_master(pdev);
4134
Sathya Perla7f640062012-06-05 19:37:20 +00004135 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004136 if (netdev == NULL) {
4137 status = -ENOMEM;
4138 goto rel_reg;
4139 }
4140 adapter = netdev_priv(netdev);
4141 adapter->pdev = pdev;
4142 pci_set_drvdata(pdev, adapter);
4143 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004144 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004145
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004146 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004147 if (!status) {
Craig Hada2bd92cd2013-04-21 23:28:18 +00004148 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4149 if (status < 0) {
4150 dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4151 goto free_netdev;
4152 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004153 netdev->features |= NETIF_F_HIGHDMA;
4154 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004155 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004156 if (status) {
4157 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4158 goto free_netdev;
4159 }
4160 }
4161
Sathya Perlad6b6d982012-09-05 01:56:48 +00004162 status = pci_enable_pcie_error_reporting(pdev);
4163 if (status)
4164 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4165
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004166 status = be_ctrl_init(adapter);
4167 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004168 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004169
Sathya Perla2243e2e2009-11-22 22:02:03 +00004170 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004171 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004172 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004173 if (status)
4174 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004175 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004176
Sathya Perla39f1d942012-05-08 19:41:24 +00004177 if (be_reset_required(adapter)) {
4178 status = be_cmd_reset_function(adapter);
4179 if (status)
4180 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004181
Kalesh AP2d177be2013-04-28 22:22:29 +00004182 /* Wait for interrupts to quiesce after an FLR */
4183 msleep(100);
4184 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004185
4186 /* Allow interrupts for other ULPs running on NIC function */
4187 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004188
Kalesh AP2d177be2013-04-28 22:22:29 +00004189 /* tell fw we're ready to fire cmds */
4190 status = be_cmd_fw_init(adapter);
4191 if (status)
4192 goto ctrl_clean;
4193
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004194 status = be_stats_init(adapter);
4195 if (status)
4196 goto ctrl_clean;
4197
Sathya Perla39f1d942012-05-08 19:41:24 +00004198 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004199 if (status)
4200 goto stats_clean;
4201
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004202 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004203 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004204 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004205
Sathya Perla5fb379e2009-06-18 00:02:59 +00004206 status = be_setup(adapter);
4207 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004208 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004209
Sathya Perla3abcded2010-10-03 22:12:27 -07004210 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004211 status = register_netdev(netdev);
4212 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004213 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004214
Parav Pandit045508a2012-03-26 14:27:13 +00004215 be_roce_dev_add(adapter);
4216
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004217 schedule_delayed_work(&adapter->func_recovery_work,
4218 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004219
4220 be_cmd_query_port_name(adapter, &port_name);
4221
Sathya Perlad3791422012-09-28 04:39:44 +00004222 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4223 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004224
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004225 return 0;
4226
Sathya Perla5fb379e2009-06-18 00:02:59 +00004227unsetup:
4228 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004229stats_clean:
4230 be_stats_cleanup(adapter);
4231ctrl_clean:
4232 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004233free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004234 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004235 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004236rel_reg:
4237 pci_release_regions(pdev);
4238disable_dev:
4239 pci_disable_device(pdev);
4240do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004241 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004242 return status;
4243}
4244
4245static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4246{
4247 struct be_adapter *adapter = pci_get_drvdata(pdev);
4248 struct net_device *netdev = adapter->netdev;
4249
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004250 if (adapter->wol)
4251 be_setup_wol(adapter, true);
4252
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004253 cancel_delayed_work_sync(&adapter->func_recovery_work);
4254
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004255 netif_device_detach(netdev);
4256 if (netif_running(netdev)) {
4257 rtnl_lock();
4258 be_close(netdev);
4259 rtnl_unlock();
4260 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004261 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004262
4263 pci_save_state(pdev);
4264 pci_disable_device(pdev);
4265 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4266 return 0;
4267}
4268
4269static int be_resume(struct pci_dev *pdev)
4270{
4271 int status = 0;
4272 struct be_adapter *adapter = pci_get_drvdata(pdev);
4273 struct net_device *netdev = adapter->netdev;
4274
4275 netif_device_detach(netdev);
4276
4277 status = pci_enable_device(pdev);
4278 if (status)
4279 return status;
4280
4281 pci_set_power_state(pdev, 0);
4282 pci_restore_state(pdev);
4283
Sathya Perla2243e2e2009-11-22 22:02:03 +00004284 /* tell fw we're ready to fire cmds */
4285 status = be_cmd_fw_init(adapter);
4286 if (status)
4287 return status;
4288
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004289 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004290 if (netif_running(netdev)) {
4291 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004292 be_open(netdev);
4293 rtnl_unlock();
4294 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004295
4296 schedule_delayed_work(&adapter->func_recovery_work,
4297 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004298 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004299
4300 if (adapter->wol)
4301 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004302
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004303 return 0;
4304}
4305
Sathya Perla82456b02010-02-17 01:35:37 +00004306/*
4307 * An FLR will stop BE from DMAing any data.
4308 */
4309static void be_shutdown(struct pci_dev *pdev)
4310{
4311 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004312
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004313 if (!adapter)
4314 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004315
Sathya Perla0f4a6822011-03-21 20:49:28 +00004316 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004317 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004318
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004319 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004320
Ajit Khaparde57841862011-04-06 18:08:43 +00004321 be_cmd_reset_function(adapter);
4322
Sathya Perla82456b02010-02-17 01:35:37 +00004323 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004324}
4325
Sathya Perlacf588472010-02-14 21:22:01 +00004326static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4327 pci_channel_state_t state)
4328{
4329 struct be_adapter *adapter = pci_get_drvdata(pdev);
4330 struct net_device *netdev = adapter->netdev;
4331
4332 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4333
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004334 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004335
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004336 cancel_delayed_work_sync(&adapter->func_recovery_work);
4337
4338 rtnl_lock();
Sathya Perlacf588472010-02-14 21:22:01 +00004339 netif_device_detach(netdev);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004340 rtnl_unlock();
Sathya Perlacf588472010-02-14 21:22:01 +00004341
4342 if (netif_running(netdev)) {
4343 rtnl_lock();
4344 be_close(netdev);
4345 rtnl_unlock();
4346 }
4347 be_clear(adapter);
4348
4349 if (state == pci_channel_io_perm_failure)
4350 return PCI_ERS_RESULT_DISCONNECT;
4351
4352 pci_disable_device(pdev);
4353
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004354 /* The error could cause the FW to trigger a flash debug dump.
4355 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004356 * can cause it not to recover; wait for it to finish.
4357 * Wait only for first function as it is needed only once per
4358 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004359 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004360 if (pdev->devfn == 0)
4361 ssleep(30);
4362
Sathya Perlacf588472010-02-14 21:22:01 +00004363 return PCI_ERS_RESULT_NEED_RESET;
4364}
4365
4366static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4367{
4368 struct be_adapter *adapter = pci_get_drvdata(pdev);
4369 int status;
4370
4371 dev_info(&adapter->pdev->dev, "EEH reset\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004372 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004373
4374 status = pci_enable_device(pdev);
4375 if (status)
4376 return PCI_ERS_RESULT_DISCONNECT;
4377
4378 pci_set_master(pdev);
4379 pci_set_power_state(pdev, 0);
4380 pci_restore_state(pdev);
4381
4382 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004383 dev_info(&adapter->pdev->dev,
4384 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004385 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004386 if (status)
4387 return PCI_ERS_RESULT_DISCONNECT;
4388
Sathya Perlad6b6d982012-09-05 01:56:48 +00004389 pci_cleanup_aer_uncorrect_error_status(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004390 return PCI_ERS_RESULT_RECOVERED;
4391}
4392
4393static void be_eeh_resume(struct pci_dev *pdev)
4394{
4395 int status = 0;
4396 struct be_adapter *adapter = pci_get_drvdata(pdev);
4397 struct net_device *netdev = adapter->netdev;
4398
4399 dev_info(&adapter->pdev->dev, "EEH resume\n");
4400
4401 pci_save_state(pdev);
4402
Kalesh AP2d177be2013-04-28 22:22:29 +00004403 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004404 if (status)
4405 goto err;
4406
Kalesh AP2d177be2013-04-28 22:22:29 +00004407 /* tell fw we're ready to fire cmds */
4408 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004409 if (status)
4410 goto err;
4411
Sathya Perlacf588472010-02-14 21:22:01 +00004412 status = be_setup(adapter);
4413 if (status)
4414 goto err;
4415
4416 if (netif_running(netdev)) {
4417 status = be_open(netdev);
4418 if (status)
4419 goto err;
4420 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004421
4422 schedule_delayed_work(&adapter->func_recovery_work,
4423 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004424 netif_device_attach(netdev);
4425 return;
4426err:
4427 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004428}
4429
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004430static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004431 .error_detected = be_eeh_err_detected,
4432 .slot_reset = be_eeh_reset,
4433 .resume = be_eeh_resume,
4434};
4435
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004436static struct pci_driver be_driver = {
4437 .name = DRV_NAME,
4438 .id_table = be_dev_ids,
4439 .probe = be_probe,
4440 .remove = be_remove,
4441 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004442 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004443 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004444 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004445};
4446
4447static int __init be_init_module(void)
4448{
Joe Perches8e95a202009-12-03 07:58:21 +00004449 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4450 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004451 printk(KERN_WARNING DRV_NAME
4452 " : Module param rx_frag_size must be 2048/4096/8192."
4453 " Using 2048\n");
4454 rx_frag_size = 2048;
4455 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004456
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004457 return pci_register_driver(&be_driver);
4458}
4459module_init(be_init_module);
4460
4461static void __exit be_exit_module(void)
4462{
4463 pci_unregister_driver(&be_driver);
4464}
4465module_exit(be_exit_module);