blob: 4babc8a4a54396b9dfbb64dd0292f5ade612021c [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamc7bb15a2013-03-06 20:05:05 +00002 * Copyright (C) 2005 - 2013 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070024
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000028MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_LICENSE("GPL");
30
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla11ac75e2011-12-13 00:58:50 +000035static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070048 { 0 }
49};
50MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000051/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070052static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000053 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85};
86/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070087static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000088 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700111 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700121
Sathya Perla752961a2011-10-24 02:45:03 +0000122/* Is BE in a multi-channel mode */
123static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127}
128
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700129static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130{
131 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000135 mem->va = NULL;
136 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
Joe Perches1f9061d22013-03-15 07:23:58 +0000149 GFP_KERNEL | __GFP_ZERO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000151 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 return 0;
153}
154
Somnath Kotur68c45a22013-03-14 02:42:07 +0000155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169
Sathya Perladb3ea782011-08-22 19:41:52 +0000170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Somnath Kotur68c45a22013-03-14 02:42:07 +0000174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
Sathya Perla8788fdc2009-07-27 22:52:03 +0000190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700191{
192 u32 val = 0;
193 val |= qid & DB_RQ_RING_ID_MASK;
194 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000195
196 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000197 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700198}
199
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000200static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
201 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202{
203 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000206
207 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000208 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209}
210
Sathya Perla8788fdc2009-07-27 22:52:03 +0000211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700212 bool arm, bool clear_int, u16 num_popped)
213{
214 u32 val = 0;
215 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000216 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000218
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000219 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000220 return;
221
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700229}
230
Sathya Perla8788fdc2009-07-27 22:52:03 +0000231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232{
233 u32 val = 0;
234 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000235 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000237
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000238 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000239 return;
240
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241 if (arm)
242 val |= 1 << DB_CQ_REARM_SHIFT;
243 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000244 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245}
246
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247static int be_mac_addr_set(struct net_device *netdev, void *p)
248{
249 struct be_adapter *adapter = netdev_priv(netdev);
250 struct sockaddr *addr = p;
251 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000252 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000253 u32 pmac_id = adapter->pmac_id[0];
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000254 bool active_mac = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700255
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000256 if (!is_valid_ether_addr(addr->sa_data))
257 return -EADDRNOTAVAIL;
258
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000259 /* For BE VF, MAC address is already activated by PF.
260 * Hence only operation left is updating netdev->devaddr.
261 * Update it if user is passing the same MAC which was used
262 * during configuring VF MAC from PF(Hypervisor).
263 */
264 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
265 status = be_cmd_mac_addr_query(adapter, current_mac,
266 false, adapter->if_handle, 0);
267 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
268 goto done;
269 else
270 goto err;
271 }
272
273 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
274 goto done;
275
276 /* For Lancer check if any MAC is active.
277 * If active, get its mac id.
278 */
279 if (lancer_chip(adapter) && !be_physfn(adapter))
280 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
281 &pmac_id, 0);
282
283 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
284 adapter->if_handle,
285 &adapter->pmac_id[0], 0);
286
Sathya Perlaa65027e2009-08-17 00:58:04 +0000287 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000288 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700289
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000290 if (active_mac)
291 be_cmd_pmac_del(adapter, adapter->if_handle,
292 pmac_id, 0);
293done:
Somnath Koture3a7ae22011-10-27 07:14:05 +0000294 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
295 return 0;
296err:
297 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700298 return status;
299}
300
Sathya Perlaca34fe32012-11-06 17:48:56 +0000301/* BE2 supports only v0 cmd */
302static void *hw_stats_from_cmd(struct be_adapter *adapter)
303{
304 if (BE2_chip(adapter)) {
305 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
306
307 return &cmd->hw_stats;
308 } else {
309 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
310
311 return &cmd->hw_stats;
312 }
313}
314
315/* BE2 supports only v0 cmd */
316static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
317{
318 if (BE2_chip(adapter)) {
319 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
320
321 return &hw_stats->erx;
322 } else {
323 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
324
325 return &hw_stats->erx;
326 }
327}
328
329static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000330{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000334 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000335 &rxf_stats->port[adapter->port_num];
336 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000337
Sathya Perlaac124ff2011-07-25 19:10:14 +0000338 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000339 drvs->rx_pause_frames = port_stats->rx_pause_frames;
340 drvs->rx_crc_errors = port_stats->rx_crc_errors;
341 drvs->rx_control_frames = port_stats->rx_control_frames;
342 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000354 drvs->rx_dropped_header_too_small =
355 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000356 drvs->rx_address_filtered =
357 port_stats->rx_address_filtered +
358 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000359 drvs->rx_alignment_symbol_errors =
360 port_stats->rx_alignment_symbol_errors;
361
362 drvs->tx_pauseframes = port_stats->tx_pauseframes;
363 drvs->tx_controlframes = port_stats->tx_controlframes;
364
365 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000366 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000367 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000368 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000369 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000371 drvs->forwarded_packets = rxf_stats->forwarded_packets;
372 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000373 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000375 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
376}
377
Sathya Perlaca34fe32012-11-06 17:48:56 +0000378static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000379{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000380 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000383 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 &rxf_stats->port[adapter->port_num];
385 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386
Sathya Perlaac124ff2011-07-25 19:10:14 +0000387 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000388 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390 drvs->rx_pause_frames = port_stats->rx_pause_frames;
391 drvs->rx_crc_errors = port_stats->rx_crc_errors;
392 drvs->rx_control_frames = port_stats->rx_control_frames;
393 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403 drvs->rx_dropped_header_too_small =
404 port_stats->rx_dropped_header_too_small;
405 drvs->rx_input_fifo_overflow_drop =
406 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000407 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000408 drvs->rx_alignment_symbol_errors =
409 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000410 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000411 drvs->tx_pauseframes = port_stats->tx_pauseframes;
412 drvs->tx_controlframes = port_stats->tx_controlframes;
413 drvs->jabber_events = port_stats->jabber_events;
414 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000415 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000416 drvs->forwarded_packets = rxf_stats->forwarded_packets;
417 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000418 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
419 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000420 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
421}
422
Selvin Xavier005d5692011-05-16 07:36:35 +0000423static void populate_lancer_stats(struct be_adapter *adapter)
424{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000425
Selvin Xavier005d5692011-05-16 07:36:35 +0000426 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000427 struct lancer_pport_stats *pport_stats =
428 pport_stats_from_cmd(adapter);
429
430 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
431 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
432 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
433 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000434 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000435 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000436 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
437 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
438 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
439 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
440 drvs->rx_dropped_tcp_length =
441 pport_stats->rx_dropped_invalid_tcp_length;
442 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
443 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
444 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
445 drvs->rx_dropped_header_too_small =
446 pport_stats->rx_dropped_header_too_small;
447 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000448 drvs->rx_address_filtered =
449 pport_stats->rx_address_filtered +
450 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000451 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000452 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000453 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
454 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000455 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000456 drvs->forwarded_packets = pport_stats->num_forwards_lo;
457 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000458 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000459 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000460}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000461
Sathya Perla09c1c682011-08-22 19:41:53 +0000462static void accumulate_16bit_val(u32 *acc, u16 val)
463{
464#define lo(x) (x & 0xFFFF)
465#define hi(x) (x & 0xFFFF0000)
466 bool wrapped = val < lo(*acc);
467 u32 newacc = hi(*acc) + val;
468
469 if (wrapped)
470 newacc += 65536;
471 ACCESS_ONCE(*acc) = newacc;
472}
473
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000474void be_parse_stats(struct be_adapter *adapter)
475{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000476 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
477 struct be_rx_obj *rxo;
478 int i;
479
Sathya Perlaca34fe32012-11-06 17:48:56 +0000480 if (lancer_chip(adapter)) {
481 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000482 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000483 if (BE2_chip(adapter))
484 populate_be_v0_stats(adapter);
485 else
486 /* for BE3 and Skyhawk */
487 populate_be_v1_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000488
Sathya Perlaca34fe32012-11-06 17:48:56 +0000489 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
490 for_all_rx_queues(adapter, rxo, i) {
491 /* below erx HW counter can actually wrap around after
492 * 65535. Driver accumulates a 32-bit value
493 */
494 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
495 (u16)erx->rx_drops_no_fragments \
496 [rxo->q.id]);
497 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000498 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000499}
500
Sathya Perlaab1594e2011-07-25 19:10:15 +0000501static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
502 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700503{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000504 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000505 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700506 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000507 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000508 u64 pkts, bytes;
509 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700510 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700511
Sathya Perla3abcded2010-10-03 22:12:27 -0700512 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000513 const struct be_rx_stats *rx_stats = rx_stats(rxo);
514 do {
515 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
516 pkts = rx_stats(rxo)->rx_pkts;
517 bytes = rx_stats(rxo)->rx_bytes;
518 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
519 stats->rx_packets += pkts;
520 stats->rx_bytes += bytes;
521 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
522 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
523 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700524 }
525
Sathya Perla3c8def92011-06-12 20:01:58 +0000526 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000527 const struct be_tx_stats *tx_stats = tx_stats(txo);
528 do {
529 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
530 pkts = tx_stats(txo)->tx_pkts;
531 bytes = tx_stats(txo)->tx_bytes;
532 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
533 stats->tx_packets += pkts;
534 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000535 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700536
537 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000538 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000539 drvs->rx_alignment_symbol_errors +
540 drvs->rx_in_range_errors +
541 drvs->rx_out_range_errors +
542 drvs->rx_frame_too_long +
543 drvs->rx_dropped_too_small +
544 drvs->rx_dropped_too_short +
545 drvs->rx_dropped_header_too_small +
546 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000547 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700548
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700549 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000550 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000551 drvs->rx_out_range_errors +
552 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000553
Sathya Perlaab1594e2011-07-25 19:10:15 +0000554 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700555
556 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000557 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000558
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700559 /* receiver fifo overrun */
560 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000561 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000562 drvs->rx_input_fifo_overflow_drop +
563 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000564 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700565}
566
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000567void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700568{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700569 struct net_device *netdev = adapter->netdev;
570
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000571 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000572 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000573 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700574 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000575
576 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
577 netif_carrier_on(netdev);
578 else
579 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700580}
581
Sathya Perla3c8def92011-06-12 20:01:58 +0000582static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000583 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700584{
Sathya Perla3c8def92011-06-12 20:01:58 +0000585 struct be_tx_stats *stats = tx_stats(txo);
586
Sathya Perlaab1594e2011-07-25 19:10:15 +0000587 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000588 stats->tx_reqs++;
589 stats->tx_wrbs += wrb_cnt;
590 stats->tx_bytes += copied;
591 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700592 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000593 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000594 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700595}
596
597/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000598static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
599 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700600{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700601 int cnt = (skb->len > skb->data_len);
602
603 cnt += skb_shinfo(skb)->nr_frags;
604
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700605 /* to account for hdr wrb */
606 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000607 if (lancer_chip(adapter) || !(cnt & 1)) {
608 *dummy = false;
609 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700610 /* add a dummy to make it an even num */
611 cnt++;
612 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000613 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700614 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
615 return cnt;
616}
617
618static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
619{
620 wrb->frag_pa_hi = upper_32_bits(addr);
621 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
622 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000623 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700624}
625
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000626static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
627 struct sk_buff *skb)
628{
629 u8 vlan_prio;
630 u16 vlan_tag;
631
632 vlan_tag = vlan_tx_tag_get(skb);
633 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
634 /* If vlan priority provided by OS is NOT in available bmap */
635 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
636 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
637 adapter->recommended_prio;
638
639 return vlan_tag;
640}
641
Somnath Koturcc4ce022010-10-21 07:11:14 -0700642static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000643 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700644{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000645 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700646
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647 memset(hdr, 0, sizeof(*hdr));
648
649 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
650
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000651 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
653 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
654 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000655 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000656 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
658 if (is_tcp_pkt(skb))
659 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
660 else if (is_udp_pkt(skb))
661 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
662 }
663
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700664 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000666 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700667 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700668 }
669
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000670 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
671 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700672 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700673 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
674 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
675}
676
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000677static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000678 bool unmap_single)
679{
680 dma_addr_t dma;
681
682 be_dws_le_to_cpu(wrb, sizeof(*wrb));
683
684 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000685 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000686 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000687 dma_unmap_single(dev, dma, wrb->frag_len,
688 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000689 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000690 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000691 }
692}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700693
Sathya Perla3c8def92011-06-12 20:01:58 +0000694static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000695 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
696 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697{
Sathya Perla7101e112010-03-22 20:41:12 +0000698 dma_addr_t busaddr;
699 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000700 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700701 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702 struct be_eth_wrb *wrb;
703 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000704 bool map_single = false;
705 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700706
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700707 hdr = queue_head_node(txq);
708 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000709 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700710
David S. Millerebc8d2a2009-06-09 01:01:31 -0700711 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700712 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000713 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
714 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000715 goto dma_err;
716 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700717 wrb = queue_head_node(txq);
718 wrb_fill(wrb, busaddr, len);
719 be_dws_cpu_to_le(wrb, sizeof(*wrb));
720 queue_head_inc(txq);
721 copied += len;
722 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700723
David S. Millerebc8d2a2009-06-09 01:01:31 -0700724 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000725 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700726 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000727 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000728 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000729 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000730 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700731 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000732 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700733 be_dws_cpu_to_le(wrb, sizeof(*wrb));
734 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000735 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736 }
737
738 if (dummy_wrb) {
739 wrb = queue_head_node(txq);
740 wrb_fill(wrb, 0, 0);
741 be_dws_cpu_to_le(wrb, sizeof(*wrb));
742 queue_head_inc(txq);
743 }
744
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000745 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700746 be_dws_cpu_to_le(hdr, sizeof(*hdr));
747
748 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000749dma_err:
750 txq->head = map_head;
751 while (copied) {
752 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000753 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000754 map_single = false;
755 copied -= wrb->frag_len;
756 queue_head_inc(txq);
757 }
758 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700759}
760
Somnath Kotur93040ae2012-06-26 22:32:10 +0000761static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000762 struct sk_buff *skb,
763 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000764{
765 u16 vlan_tag = 0;
766
767 skb = skb_share_check(skb, GFP_ATOMIC);
768 if (unlikely(!skb))
769 return skb;
770
771 if (vlan_tx_tag_present(skb)) {
772 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
David S. Miller6e0895c2013-04-22 20:32:51 -0400773 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ivan Veceraf11a8692013-04-12 16:49:24 +0200774 if (skb)
775 skb->vlan_tci = 0;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000776 }
777
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000778 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
779 if (!vlan_tag)
780 vlan_tag = adapter->pvid;
781 if (skip_hw_vlan)
782 *skip_hw_vlan = true;
783 }
784
785 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400786 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000787 if (unlikely(!skb))
788 return skb;
789
790 skb->vlan_tci = 0;
791 }
792
793 /* Insert the outer VLAN, if any */
794 if (adapter->qnq_vid) {
795 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400796 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000797 if (unlikely(!skb))
798 return skb;
799 if (skip_hw_vlan)
800 *skip_hw_vlan = true;
801 }
802
Somnath Kotur93040ae2012-06-26 22:32:10 +0000803 return skb;
804}
805
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000806static bool be_ipv6_exthdr_check(struct sk_buff *skb)
807{
808 struct ethhdr *eh = (struct ethhdr *)skb->data;
809 u16 offset = ETH_HLEN;
810
811 if (eh->h_proto == htons(ETH_P_IPV6)) {
812 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
813
814 offset += sizeof(struct ipv6hdr);
815 if (ip6h->nexthdr != NEXTHDR_TCP &&
816 ip6h->nexthdr != NEXTHDR_UDP) {
817 struct ipv6_opt_hdr *ehdr =
818 (struct ipv6_opt_hdr *) (skb->data + offset);
819
820 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
821 if (ehdr->hdrlen == 0xff)
822 return true;
823 }
824 }
825 return false;
826}
827
828static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
829{
830 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
831}
832
833static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
834{
835 return BE3_chip(adapter) &&
836 be_ipv6_exthdr_check(skb);
837}
838
Stephen Hemminger613573252009-08-31 19:50:58 +0000839static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700840 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700841{
842 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000843 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
844 struct be_queue_info *txq = &txo->q;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000845 struct iphdr *ip = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700846 u32 wrb_cnt = 0, copied = 0;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000847 u32 start = txq->head, eth_hdr_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700848 bool dummy_wrb, stopped = false;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000849 bool skip_hw_vlan = false;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000850 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700851
Somnath Kotur93040ae2012-06-26 22:32:10 +0000852 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
853 VLAN_ETH_HLEN : ETH_HLEN;
854
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000855 /* For padded packets, BE HW modifies tot_len field in IP header
856 * incorrecly when VLAN tag is inserted by HW.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000857 */
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000858 if (skb->len <= 60 && vlan_tx_tag_present(skb) && is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000859 ip = (struct iphdr *)ip_hdr(skb);
860 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
861 }
862
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000863 /* If vlan tag is already inlined in the packet, skip HW VLAN
864 * tagging in UMC mode
865 */
866 if ((adapter->function_mode & UMC_ENABLED) &&
867 veh->h_vlan_proto == htons(ETH_P_8021Q))
868 skip_hw_vlan = true;
869
Somnath Kotur93040ae2012-06-26 22:32:10 +0000870 /* HW has a bug wherein it will calculate CSUM for VLAN
871 * pkts even though it is disabled.
872 * Manually insert VLAN in pkt.
873 */
874 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000875 vlan_tx_tag_present(skb)) {
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000876 skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
877 if (unlikely(!skb))
878 goto tx_drop;
879 }
880
881 /* HW may lockup when VLAN HW tagging is requested on
882 * certain ipv6 packets. Drop such pkts if the HW workaround to
883 * skip HW tagging is not enabled by FW.
884 */
885 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
886 (adapter->pvid || adapter->qnq_vid) &&
887 !qnq_async_evt_rcvd(adapter)))
888 goto tx_drop;
889
890 /* Manual VLAN tag insertion to prevent:
891 * ASIC lockup when the ASIC inserts VLAN tag into
892 * certain ipv6 packets. Insert VLAN tags in driver,
893 * and set event, completion, vlan bits accordingly
894 * in the Tx WRB.
895 */
896 if (be_ipv6_tx_stall_chk(adapter, skb) &&
897 be_vlan_tag_tx_chk(adapter, skb)) {
898 skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000899 if (unlikely(!skb))
900 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000901 }
902
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000903 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700904
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000905 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
906 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000907 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000908 int gso_segs = skb_shinfo(skb)->gso_segs;
909
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000910 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000911 BUG_ON(txo->sent_skb_list[start]);
912 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700913
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000914 /* Ensure txq has space for the next skb; Else stop the queue
915 * *BEFORE* ringing the tx doorbell, so that we serialze the
916 * tx compls of the current transmit which'll wake up the queue
917 */
Sathya Perla7101e112010-03-22 20:41:12 +0000918 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000919 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
920 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000921 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000922 stopped = true;
923 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700924
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000925 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000926
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000927 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000928 } else {
929 txq->head = start;
930 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700931 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000932tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700933 return NETDEV_TX_OK;
934}
935
936static int be_change_mtu(struct net_device *netdev, int new_mtu)
937{
938 struct be_adapter *adapter = netdev_priv(netdev);
939 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000940 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
941 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700942 dev_info(&adapter->pdev->dev,
943 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000944 BE_MIN_MTU,
945 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700946 return -EINVAL;
947 }
948 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
949 netdev->mtu, new_mtu);
950 netdev->mtu = new_mtu;
951 return 0;
952}
953
954/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000955 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
956 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700957 */
Sathya Perla10329df2012-06-05 19:37:18 +0000958static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700959{
Sathya Perla10329df2012-06-05 19:37:18 +0000960 u16 vids[BE_NUM_VLANS_SUPPORTED];
961 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000962 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000963
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000964 /* No need to further configure vids if in promiscuous mode */
965 if (adapter->promiscuous)
966 return 0;
967
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000968 if (adapter->vlans_added > adapter->max_vlans)
969 goto set_vlan_promisc;
970
971 /* Construct VLAN Table to give to HW */
972 for (i = 0; i < VLAN_N_VID; i++)
973 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +0000974 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000975
976 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +0000977 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000978
979 /* Set to VLAN promisc mode as setting VLAN filter failed */
980 if (status) {
981 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
982 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
983 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700984 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000985
Sathya Perlab31c50a2009-09-17 10:30:13 -0700986 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000987
988set_vlan_promisc:
989 status = be_cmd_vlan_config(adapter, adapter->if_handle,
990 NULL, 0, 1, 1);
991 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700992}
993
Patrick McHardy80d5c362013-04-19 02:04:28 +0000994static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700995{
996 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000997 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700998
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000999 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001000 status = -EINVAL;
1001 goto ret;
1002 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001003
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001004 /* Packets with VID 0 are always received by Lancer by default */
1005 if (lancer_chip(adapter) && vid == 0)
1006 goto ret;
1007
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001008 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001009 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Sathya Perla10329df2012-06-05 19:37:18 +00001010 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001011
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001012 if (!status)
1013 adapter->vlans_added++;
1014 else
1015 adapter->vlan_tag[vid] = 0;
1016ret:
1017 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001018}
1019
Patrick McHardy80d5c362013-04-19 02:04:28 +00001020static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001021{
1022 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001023 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001024
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001025 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001026 status = -EINVAL;
1027 goto ret;
1028 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001029
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001030 /* Packets with VID 0 are always received by Lancer by default */
1031 if (lancer_chip(adapter) && vid == 0)
1032 goto ret;
1033
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001034 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001035 if (adapter->vlans_added <= adapter->max_vlans)
Sathya Perla10329df2012-06-05 19:37:18 +00001036 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001037
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001038 if (!status)
1039 adapter->vlans_added--;
1040 else
1041 adapter->vlan_tag[vid] = 1;
1042ret:
1043 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001044}
1045
Sathya Perlaa54769f2011-10-24 02:45:00 +00001046static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001047{
1048 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001049 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001050
1051 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001052 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001053 adapter->promiscuous = true;
1054 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001055 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001056
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001057 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001058 if (adapter->promiscuous) {
1059 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +00001060 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001061
1062 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001063 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001064 }
1065
Sathya Perlae7b909a2009-11-22 22:01:10 +00001066 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001067 if (netdev->flags & IFF_ALLMULTI ||
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001068 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001069 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001070 goto done;
1071 }
1072
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001073 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1074 struct netdev_hw_addr *ha;
1075 int i = 1; /* First slot is claimed by the Primary MAC */
1076
1077 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1078 be_cmd_pmac_del(adapter, adapter->if_handle,
1079 adapter->pmac_id[i], 0);
1080 }
1081
1082 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
1083 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1084 adapter->promiscuous = true;
1085 goto done;
1086 }
1087
1088 netdev_for_each_uc_addr(ha, adapter->netdev) {
1089 adapter->uc_macs++; /* First slot is for Primary MAC */
1090 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1091 adapter->if_handle,
1092 &adapter->pmac_id[adapter->uc_macs], 0);
1093 }
1094 }
1095
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001096 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1097
1098 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1099 if (status) {
1100 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1101 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1102 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1103 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001104done:
1105 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001106}
1107
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001108static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1109{
1110 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001111 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001112 int status;
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +00001113 bool active_mac = false;
1114 u32 pmac_id;
1115 u8 old_mac[ETH_ALEN];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001116
Sathya Perla11ac75e2011-12-13 00:58:50 +00001117 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001118 return -EPERM;
1119
Sathya Perla11ac75e2011-12-13 00:58:50 +00001120 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001121 return -EINVAL;
1122
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001123 if (lancer_chip(adapter)) {
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +00001124 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1125 &pmac_id, vf + 1);
1126 if (!status && active_mac)
1127 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1128 pmac_id, vf + 1);
1129
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001130 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1131 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +00001132 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1133 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001134
Sathya Perla11ac75e2011-12-13 00:58:50 +00001135 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1136 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001137 }
1138
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001139 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001140 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1141 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001142 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001143 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001144
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001145 return status;
1146}
1147
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001148static int be_get_vf_config(struct net_device *netdev, int vf,
1149 struct ifla_vf_info *vi)
1150{
1151 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001152 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001153
Sathya Perla11ac75e2011-12-13 00:58:50 +00001154 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001155 return -EPERM;
1156
Sathya Perla11ac75e2011-12-13 00:58:50 +00001157 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001158 return -EINVAL;
1159
1160 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001161 vi->tx_rate = vf_cfg->tx_rate;
1162 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001163 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001164 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001165
1166 return 0;
1167}
1168
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001169static int be_set_vf_vlan(struct net_device *netdev,
1170 int vf, u16 vlan, u8 qos)
1171{
1172 struct be_adapter *adapter = netdev_priv(netdev);
1173 int status = 0;
1174
Sathya Perla11ac75e2011-12-13 00:58:50 +00001175 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001176 return -EPERM;
1177
Sathya Perla11ac75e2011-12-13 00:58:50 +00001178 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001179 return -EINVAL;
1180
1181 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001182 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1183 /* If this is new value, program it. Else skip. */
1184 adapter->vf_cfg[vf].vlan_tag = vlan;
1185
1186 status = be_cmd_set_hsw_config(adapter, vlan,
1187 vf + 1, adapter->vf_cfg[vf].if_handle);
1188 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001189 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001190 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001191 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001192 vlan = adapter->vf_cfg[vf].def_vid;
1193 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1194 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001195 }
1196
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001197
1198 if (status)
1199 dev_info(&adapter->pdev->dev,
1200 "VLAN %d config on VF %d failed\n", vlan, vf);
1201 return status;
1202}
1203
Ajit Khapardee1d18732010-07-23 01:52:13 +00001204static int be_set_vf_tx_rate(struct net_device *netdev,
1205 int vf, int rate)
1206{
1207 struct be_adapter *adapter = netdev_priv(netdev);
1208 int status = 0;
1209
Sathya Perla11ac75e2011-12-13 00:58:50 +00001210 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001211 return -EPERM;
1212
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001213 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001214 return -EINVAL;
1215
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001216 if (rate < 100 || rate > 10000) {
1217 dev_err(&adapter->pdev->dev,
1218 "tx rate must be between 100 and 10000 Mbps\n");
1219 return -EINVAL;
1220 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001221
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001222 if (lancer_chip(adapter))
1223 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1224 else
1225 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001226
1227 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001228 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001229 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001230 else
1231 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001232 return status;
1233}
1234
Sathya Perla39f1d942012-05-08 19:41:24 +00001235static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1236{
1237 struct pci_dev *dev, *pdev = adapter->pdev;
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001238 int vfs = 0, assigned_vfs = 0, pos;
Sathya Perla39f1d942012-05-08 19:41:24 +00001239 u16 offset, stride;
1240
1241 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
Sathya Perlad79c0a22012-06-05 19:37:22 +00001242 if (!pos)
1243 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00001244 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1245 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1246
1247 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1248 while (dev) {
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001249 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
Sathya Perla39f1d942012-05-08 19:41:24 +00001250 vfs++;
1251 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1252 assigned_vfs++;
1253 }
1254 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1255 }
1256 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1257}
1258
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001259static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001260{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001261 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001262 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001263 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001264 u64 pkts;
1265 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001266
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001267 if (!eqo->enable_aic) {
1268 eqd = eqo->eqd;
1269 goto modify_eqd;
1270 }
1271
1272 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001273 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001274
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001275 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1276
Sathya Perla4097f662009-03-24 16:40:13 -07001277 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001278 if (time_before(now, stats->rx_jiffies)) {
1279 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001280 return;
1281 }
1282
Sathya Perlaac124ff2011-07-25 19:10:14 +00001283 /* Update once a second */
1284 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001285 return;
1286
Sathya Perlaab1594e2011-07-25 19:10:15 +00001287 do {
1288 start = u64_stats_fetch_begin_bh(&stats->sync);
1289 pkts = stats->rx_pkts;
1290 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1291
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001292 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001293 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001294 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001295 eqd = (stats->rx_pps / 110000) << 3;
1296 eqd = min(eqd, eqo->max_eqd);
1297 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001298 if (eqd < 10)
1299 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001300
1301modify_eqd:
1302 if (eqd != eqo->cur_eqd) {
1303 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1304 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001305 }
Sathya Perla4097f662009-03-24 16:40:13 -07001306}
1307
Sathya Perla3abcded2010-10-03 22:12:27 -07001308static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001309 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001310{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001311 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001312
Sathya Perlaab1594e2011-07-25 19:10:15 +00001313 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001314 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001315 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001316 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001317 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001318 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001319 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001320 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001321 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001322}
1323
Sathya Perla2e588f82011-03-11 02:49:26 +00001324static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001325{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001326 /* L4 checksum is not reliable for non TCP/UDP packets.
1327 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001328 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1329 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001330}
1331
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001332static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1333 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001334{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001335 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001336 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001337 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001338
Sathya Perla3abcded2010-10-03 22:12:27 -07001339 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001340 BUG_ON(!rx_page_info->page);
1341
Ajit Khaparde205859a2010-02-09 01:34:21 +00001342 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001343 dma_unmap_page(&adapter->pdev->dev,
1344 dma_unmap_addr(rx_page_info, bus),
1345 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001346 rx_page_info->last_page_user = false;
1347 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001348
1349 atomic_dec(&rxq->used);
1350 return rx_page_info;
1351}
1352
1353/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001354static void be_rx_compl_discard(struct be_rx_obj *rxo,
1355 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001356{
Sathya Perla3abcded2010-10-03 22:12:27 -07001357 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001358 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001359 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001360
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001361 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001362 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001363 put_page(page_info->page);
1364 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001365 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001366 }
1367}
1368
1369/*
1370 * skb_fill_rx_data forms a complete skb for an ether frame
1371 * indicated by rxcp.
1372 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001373static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1374 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001375{
Sathya Perla3abcded2010-10-03 22:12:27 -07001376 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001377 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001378 u16 i, j;
1379 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001380 u8 *start;
1381
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001382 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001383 start = page_address(page_info->page) + page_info->page_offset;
1384 prefetch(start);
1385
1386 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001387 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001388
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001389 skb->len = curr_frag_len;
1390 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001391 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001392 /* Complete packet has now been moved to data */
1393 put_page(page_info->page);
1394 skb->data_len = 0;
1395 skb->tail += curr_frag_len;
1396 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001397 hdr_len = ETH_HLEN;
1398 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001399 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001400 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001401 skb_shinfo(skb)->frags[0].page_offset =
1402 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001403 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001404 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001405 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001406 skb->tail += hdr_len;
1407 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001408 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001409
Sathya Perla2e588f82011-03-11 02:49:26 +00001410 if (rxcp->pkt_size <= rx_frag_size) {
1411 BUG_ON(rxcp->num_rcvd != 1);
1412 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001413 }
1414
1415 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001416 index_inc(&rxcp->rxq_idx, rxq->len);
1417 remaining = rxcp->pkt_size - curr_frag_len;
1418 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001419 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001420 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001421
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001422 /* Coalesce all frags from the same physical page in one slot */
1423 if (page_info->page_offset == 0) {
1424 /* Fresh page */
1425 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001426 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001427 skb_shinfo(skb)->frags[j].page_offset =
1428 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001429 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001430 skb_shinfo(skb)->nr_frags++;
1431 } else {
1432 put_page(page_info->page);
1433 }
1434
Eric Dumazet9e903e02011-10-18 21:00:24 +00001435 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001436 skb->len += curr_frag_len;
1437 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001438 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001439 remaining -= curr_frag_len;
1440 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001441 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001442 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001443 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001444}
1445
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001446/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001447static void be_rx_compl_process(struct be_rx_obj *rxo,
1448 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001449{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001450 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001451 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001452 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001453
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001454 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001455 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001456 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001457 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001458 return;
1459 }
1460
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001461 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001462
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001463 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001464 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001465 else
1466 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001467
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001468 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001469 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001470 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001471 skb->rxhash = rxcp->rss_hash;
1472
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001473
Jiri Pirko343e43c2011-08-25 02:50:51 +00001474 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001475 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001476
1477 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001478}
1479
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001480/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001481void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1482 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001483{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001484 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001485 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001486 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001487 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001488 u16 remaining, curr_frag_len;
1489 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001490
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001491 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001492 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001493 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001494 return;
1495 }
1496
Sathya Perla2e588f82011-03-11 02:49:26 +00001497 remaining = rxcp->pkt_size;
1498 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001499 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001500
1501 curr_frag_len = min(remaining, rx_frag_size);
1502
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001503 /* Coalesce all frags from the same physical page in one slot */
1504 if (i == 0 || page_info->page_offset == 0) {
1505 /* First frag or Fresh page */
1506 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001507 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001508 skb_shinfo(skb)->frags[j].page_offset =
1509 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001510 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001511 } else {
1512 put_page(page_info->page);
1513 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001514 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001515 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001516 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001517 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001518 memset(page_info, 0, sizeof(*page_info));
1519 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001520 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001521
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001522 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001523 skb->len = rxcp->pkt_size;
1524 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001525 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001526 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001527 if (adapter->netdev->features & NETIF_F_RXHASH)
1528 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001529
Jiri Pirko343e43c2011-08-25 02:50:51 +00001530 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001531 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001532
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001533 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001534}
1535
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001536static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1537 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001538{
Sathya Perla2e588f82011-03-11 02:49:26 +00001539 rxcp->pkt_size =
1540 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1541 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1542 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1543 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001544 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001545 rxcp->ip_csum =
1546 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1547 rxcp->l4_csum =
1548 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1549 rxcp->ipv6 =
1550 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1551 rxcp->rxq_idx =
1552 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1553 rxcp->num_rcvd =
1554 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1555 rxcp->pkt_type =
1556 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001557 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001558 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001559 if (rxcp->vlanf) {
1560 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001561 compl);
1562 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1563 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001564 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001565 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001566}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001567
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001568static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1569 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001570{
1571 rxcp->pkt_size =
1572 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1573 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1574 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1575 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001576 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001577 rxcp->ip_csum =
1578 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1579 rxcp->l4_csum =
1580 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1581 rxcp->ipv6 =
1582 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1583 rxcp->rxq_idx =
1584 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1585 rxcp->num_rcvd =
1586 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1587 rxcp->pkt_type =
1588 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001589 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001590 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001591 if (rxcp->vlanf) {
1592 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001593 compl);
1594 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1595 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001596 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001597 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001598}
1599
1600static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1601{
1602 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1603 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1604 struct be_adapter *adapter = rxo->adapter;
1605
1606 /* For checking the valid bit it is Ok to use either definition as the
1607 * valid bit is at the same position in both v0 and v1 Rx compl */
1608 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001609 return NULL;
1610
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001611 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001612 be_dws_le_to_cpu(compl, sizeof(*compl));
1613
1614 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001615 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001616 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001617 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001618
Sathya Perla15d72182011-03-21 20:49:26 +00001619 if (rxcp->vlanf) {
1620 /* vlanf could be wrongly set in some cards.
1621 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001622 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001623 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001624
Sathya Perla15d72182011-03-21 20:49:26 +00001625 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001626 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001627
Somnath Kotur939cf302011-08-18 21:51:49 -07001628 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001629 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001630 rxcp->vlanf = 0;
1631 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001632
1633 /* As the compl has been parsed, reset it; we wont touch it again */
1634 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001635
Sathya Perla3abcded2010-10-03 22:12:27 -07001636 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001637 return rxcp;
1638}
1639
Eric Dumazet1829b082011-03-01 05:48:12 +00001640static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001641{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001642 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001643
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001644 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001645 gfp |= __GFP_COMP;
1646 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001647}
1648
1649/*
1650 * Allocate a page, split it to fragments of size rx_frag_size and post as
1651 * receive buffers to BE
1652 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001653static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001654{
Sathya Perla3abcded2010-10-03 22:12:27 -07001655 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001656 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001657 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001658 struct page *pagep = NULL;
1659 struct be_eth_rx_d *rxd;
1660 u64 page_dmaaddr = 0, frag_dmaaddr;
1661 u32 posted, page_offset = 0;
1662
Sathya Perla3abcded2010-10-03 22:12:27 -07001663 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001664 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1665 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001666 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001667 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001668 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001669 break;
1670 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001671 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1672 0, adapter->big_page_size,
1673 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001674 page_info->page_offset = 0;
1675 } else {
1676 get_page(pagep);
1677 page_info->page_offset = page_offset + rx_frag_size;
1678 }
1679 page_offset = page_info->page_offset;
1680 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001681 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001682 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1683
1684 rxd = queue_head_node(rxq);
1685 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1686 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001687
1688 /* Any space left in the current big page for another frag? */
1689 if ((page_offset + rx_frag_size + rx_frag_size) >
1690 adapter->big_page_size) {
1691 pagep = NULL;
1692 page_info->last_page_user = true;
1693 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001694
1695 prev_page_info = page_info;
1696 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001697 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001698 }
1699 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001700 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001701
1702 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001703 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001704 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001705 } else if (atomic_read(&rxq->used) == 0) {
1706 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001707 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001708 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001709}
1710
Sathya Perla5fb379e2009-06-18 00:02:59 +00001711static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001712{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001713 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1714
1715 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1716 return NULL;
1717
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001718 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001719 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1720
1721 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1722
1723 queue_tail_inc(tx_cq);
1724 return txcp;
1725}
1726
Sathya Perla3c8def92011-06-12 20:01:58 +00001727static u16 be_tx_compl_process(struct be_adapter *adapter,
1728 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001729{
Sathya Perla3c8def92011-06-12 20:01:58 +00001730 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001731 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001732 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001733 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001734 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1735 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001736
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001737 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001738 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001739 sent_skbs[txq->tail] = NULL;
1740
1741 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001742 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001743
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001744 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001745 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001746 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001747 unmap_tx_frag(&adapter->pdev->dev, wrb,
1748 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001749 unmap_skb_hdr = false;
1750
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001751 num_wrbs++;
1752 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001753 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001754
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001755 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001756 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001757}
1758
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001759/* Return the number of events in the event queue */
1760static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001761{
1762 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001763 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001764
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001765 do {
1766 eqe = queue_tail_node(&eqo->q);
1767 if (eqe->evt == 0)
1768 break;
1769
1770 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001771 eqe->evt = 0;
1772 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001773 queue_tail_inc(&eqo->q);
1774 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001775
1776 return num;
1777}
1778
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001779/* Leaves the EQ is disarmed state */
1780static void be_eq_clean(struct be_eq_obj *eqo)
1781{
1782 int num = events_get(eqo);
1783
1784 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1785}
1786
1787static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001788{
1789 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001790 struct be_queue_info *rxq = &rxo->q;
1791 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001792 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00001793 struct be_adapter *adapter = rxo->adapter;
1794 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001795 u16 tail;
1796
Sathya Perlad23e9462012-12-17 19:38:51 +00001797 /* Consume pending rx completions.
1798 * Wait for the flush completion (identified by zero num_rcvd)
1799 * to arrive. Notify CQ even when there are no more CQ entries
1800 * for HW to flush partially coalesced CQ entries.
1801 * In Lancer, there is no need to wait for flush compl.
1802 */
1803 for (;;) {
1804 rxcp = be_rx_compl_get(rxo);
1805 if (rxcp == NULL) {
1806 if (lancer_chip(adapter))
1807 break;
1808
1809 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1810 dev_warn(&adapter->pdev->dev,
1811 "did not receive flush compl\n");
1812 break;
1813 }
1814 be_cq_notify(adapter, rx_cq->id, true, 0);
1815 mdelay(1);
1816 } else {
1817 be_rx_compl_discard(rxo, rxcp);
1818 be_cq_notify(adapter, rx_cq->id, true, 1);
1819 if (rxcp->num_rcvd == 0)
1820 break;
1821 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001822 }
1823
Sathya Perlad23e9462012-12-17 19:38:51 +00001824 /* After cleanup, leave the CQ in unarmed state */
1825 be_cq_notify(adapter, rx_cq->id, false, 0);
1826
1827 /* Then free posted rx buffers that were not used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001828 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001829 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001830 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001831 put_page(page_info->page);
1832 memset(page_info, 0, sizeof(*page_info));
1833 }
1834 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001835 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001836}
1837
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001838static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001839{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001840 struct be_tx_obj *txo;
1841 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001842 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001843 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001844 struct sk_buff *sent_skb;
1845 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001846 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001847
Sathya Perlaa8e91792009-08-10 03:42:43 +00001848 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1849 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001850 pending_txqs = adapter->num_tx_qs;
1851
1852 for_all_tx_queues(adapter, txo, i) {
1853 txq = &txo->q;
1854 while ((txcp = be_tx_compl_get(&txo->cq))) {
1855 end_idx =
1856 AMAP_GET_BITS(struct amap_eth_tx_compl,
1857 wrb_index, txcp);
1858 num_wrbs += be_tx_compl_process(adapter, txo,
1859 end_idx);
1860 cmpl++;
1861 }
1862 if (cmpl) {
1863 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1864 atomic_sub(num_wrbs, &txq->used);
1865 cmpl = 0;
1866 num_wrbs = 0;
1867 }
1868 if (atomic_read(&txq->used) == 0)
1869 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001870 }
1871
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001872 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001873 break;
1874
1875 mdelay(1);
1876 } while (true);
1877
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001878 for_all_tx_queues(adapter, txo, i) {
1879 txq = &txo->q;
1880 if (atomic_read(&txq->used))
1881 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1882 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001883
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001884 /* free posted tx for which compls will never arrive */
1885 while (atomic_read(&txq->used)) {
1886 sent_skb = txo->sent_skb_list[txq->tail];
1887 end_idx = txq->tail;
1888 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1889 &dummy_wrb);
1890 index_adv(&end_idx, num_wrbs - 1, txq->len);
1891 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1892 atomic_sub(num_wrbs, &txq->used);
1893 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001894 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001895}
1896
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001897static void be_evt_queues_destroy(struct be_adapter *adapter)
1898{
1899 struct be_eq_obj *eqo;
1900 int i;
1901
1902 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001903 if (eqo->q.created) {
1904 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001905 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001906 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001907 be_queue_free(adapter, &eqo->q);
1908 }
1909}
1910
1911static int be_evt_queues_create(struct be_adapter *adapter)
1912{
1913 struct be_queue_info *eq;
1914 struct be_eq_obj *eqo;
1915 int i, rc;
1916
1917 adapter->num_evt_qs = num_irqs(adapter);
1918
1919 for_all_evt_queues(adapter, eqo, i) {
1920 eqo->adapter = adapter;
1921 eqo->tx_budget = BE_TX_BUDGET;
1922 eqo->idx = i;
1923 eqo->max_eqd = BE_MAX_EQD;
1924 eqo->enable_aic = true;
1925
1926 eq = &eqo->q;
1927 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1928 sizeof(struct be_eq_entry));
1929 if (rc)
1930 return rc;
1931
1932 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1933 if (rc)
1934 return rc;
1935 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001936 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001937}
1938
Sathya Perla5fb379e2009-06-18 00:02:59 +00001939static void be_mcc_queues_destroy(struct be_adapter *adapter)
1940{
1941 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001942
Sathya Perla8788fdc2009-07-27 22:52:03 +00001943 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001944 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001945 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001946 be_queue_free(adapter, q);
1947
Sathya Perla8788fdc2009-07-27 22:52:03 +00001948 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001949 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001950 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001951 be_queue_free(adapter, q);
1952}
1953
1954/* Must be called only after TX qs are created as MCC shares TX EQ */
1955static int be_mcc_queues_create(struct be_adapter *adapter)
1956{
1957 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001958
Sathya Perla8788fdc2009-07-27 22:52:03 +00001959 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001960 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001961 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001962 goto err;
1963
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001964 /* Use the default EQ for MCC completions */
1965 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001966 goto mcc_cq_free;
1967
Sathya Perla8788fdc2009-07-27 22:52:03 +00001968 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001969 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1970 goto mcc_cq_destroy;
1971
Sathya Perla8788fdc2009-07-27 22:52:03 +00001972 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001973 goto mcc_q_free;
1974
1975 return 0;
1976
1977mcc_q_free:
1978 be_queue_free(adapter, q);
1979mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001980 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001981mcc_cq_free:
1982 be_queue_free(adapter, cq);
1983err:
1984 return -1;
1985}
1986
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001987static void be_tx_queues_destroy(struct be_adapter *adapter)
1988{
1989 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001990 struct be_tx_obj *txo;
1991 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001992
Sathya Perla3c8def92011-06-12 20:01:58 +00001993 for_all_tx_queues(adapter, txo, i) {
1994 q = &txo->q;
1995 if (q->created)
1996 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1997 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001998
Sathya Perla3c8def92011-06-12 20:01:58 +00001999 q = &txo->cq;
2000 if (q->created)
2001 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2002 be_queue_free(adapter, q);
2003 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002004}
2005
Sathya Perladafc0fe2011-10-24 02:45:02 +00002006static int be_num_txqs_want(struct be_adapter *adapter)
2007{
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002008 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
2009 be_is_mc(adapter) ||
2010 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
Sathya Perlaca34fe32012-11-06 17:48:56 +00002011 BE2_chip(adapter))
Sathya Perladafc0fe2011-10-24 02:45:02 +00002012 return 1;
2013 else
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002014 return adapter->max_tx_queues;
Sathya Perladafc0fe2011-10-24 02:45:02 +00002015}
2016
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002017static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002018{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002019 struct be_queue_info *cq, *eq;
2020 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00002021 struct be_tx_obj *txo;
2022 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002023
Sathya Perladafc0fe2011-10-24 02:45:02 +00002024 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00002025 if (adapter->num_tx_qs != MAX_TX_QS) {
2026 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00002027 netif_set_real_num_tx_queues(adapter->netdev,
2028 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00002029 rtnl_unlock();
2030 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00002031
Sathya Perla3c8def92011-06-12 20:01:58 +00002032 for_all_tx_queues(adapter, txo, i) {
2033 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002034 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2035 sizeof(struct be_eth_tx_compl));
2036 if (status)
2037 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002038
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002039 /* If num_evt_qs is less than num_tx_qs, then more than
2040 * one txq share an eq
2041 */
2042 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2043 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2044 if (status)
2045 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00002046 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002047 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002048}
2049
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002050static int be_tx_qs_create(struct be_adapter *adapter)
2051{
2052 struct be_tx_obj *txo;
2053 int i, status;
2054
2055 for_all_tx_queues(adapter, txo, i) {
2056 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2057 sizeof(struct be_eth_wrb));
2058 if (status)
2059 return status;
2060
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002061 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002062 if (status)
2063 return status;
2064 }
2065
Sathya Perlad3791422012-09-28 04:39:44 +00002066 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2067 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002068 return 0;
2069}
2070
2071static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002072{
2073 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002074 struct be_rx_obj *rxo;
2075 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002076
Sathya Perla3abcded2010-10-03 22:12:27 -07002077 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002078 q = &rxo->cq;
2079 if (q->created)
2080 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2081 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002082 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002083}
2084
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002085static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002086{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002087 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002088 struct be_rx_obj *rxo;
2089 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002090
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002091 /* We'll create as many RSS rings as there are irqs.
2092 * But when there's only one irq there's no use creating RSS rings
2093 */
2094 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2095 num_irqs(adapter) + 1 : 1;
Sathya Perla7f640062012-06-05 19:37:20 +00002096 if (adapter->num_rx_qs != MAX_RX_QS) {
2097 rtnl_lock();
2098 netif_set_real_num_rx_queues(adapter->netdev,
2099 adapter->num_rx_qs);
2100 rtnl_unlock();
2101 }
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002102
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002103 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002104 for_all_rx_queues(adapter, rxo, i) {
2105 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002106 cq = &rxo->cq;
2107 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2108 sizeof(struct be_eth_rx_compl));
2109 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002110 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002111
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002112 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2113 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002114 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002115 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002116 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002117
Sathya Perlad3791422012-09-28 04:39:44 +00002118 dev_info(&adapter->pdev->dev,
2119 "created %d RSS queue(s) and 1 default RX queue\n",
2120 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002121 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002122}
2123
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002124static irqreturn_t be_intx(int irq, void *dev)
2125{
Sathya Perlae49cc342012-11-27 19:50:02 +00002126 struct be_eq_obj *eqo = dev;
2127 struct be_adapter *adapter = eqo->adapter;
2128 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002129
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002130 /* IRQ is not expected when NAPI is scheduled as the EQ
2131 * will not be armed.
2132 * But, this can happen on Lancer INTx where it takes
2133 * a while to de-assert INTx or in BE2 where occasionaly
2134 * an interrupt may be raised even when EQ is unarmed.
2135 * If NAPI is already scheduled, then counting & notifying
2136 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002137 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002138 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002139 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002140 __napi_schedule(&eqo->napi);
2141 if (num_evts)
2142 eqo->spurious_intr = 0;
2143 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002144 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002145
2146 /* Return IRQ_HANDLED only for the the first spurious intr
2147 * after a valid intr to stop the kernel from branding
2148 * this irq as a bad one!
2149 */
2150 if (num_evts || eqo->spurious_intr++ == 0)
2151 return IRQ_HANDLED;
2152 else
2153 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002154}
2155
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002156static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002157{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002158 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002159
Sathya Perla0b545a62012-11-23 00:27:18 +00002160 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2161 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002162 return IRQ_HANDLED;
2163}
2164
Sathya Perla2e588f82011-03-11 02:49:26 +00002165static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002166{
Sathya Perla2e588f82011-03-11 02:49:26 +00002167 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002168}
2169
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002170static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2171 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002172{
Sathya Perla3abcded2010-10-03 22:12:27 -07002173 struct be_adapter *adapter = rxo->adapter;
2174 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002175 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002176 u32 work_done;
2177
2178 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002179 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002180 if (!rxcp)
2181 break;
2182
Sathya Perla12004ae2011-08-02 19:57:46 +00002183 /* Is it a flush compl that has no data */
2184 if (unlikely(rxcp->num_rcvd == 0))
2185 goto loop_continue;
2186
2187 /* Discard compl with partial DMA Lancer B0 */
2188 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002189 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002190 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002191 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002192
Sathya Perla12004ae2011-08-02 19:57:46 +00002193 /* On BE drop pkts that arrive due to imperfect filtering in
2194 * promiscuous mode on some skews
2195 */
2196 if (unlikely(rxcp->port != adapter->port_num &&
2197 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002198 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002199 goto loop_continue;
2200 }
2201
2202 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002203 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002204 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002205 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002206loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002207 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002208 }
2209
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002210 if (work_done) {
2211 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002212
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002213 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2214 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002215 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002216
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002217 return work_done;
2218}
2219
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002220static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2221 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002222{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002223 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002224 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002225
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002226 for (work_done = 0; work_done < budget; work_done++) {
2227 txcp = be_tx_compl_get(&txo->cq);
2228 if (!txcp)
2229 break;
2230 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002231 AMAP_GET_BITS(struct amap_eth_tx_compl,
2232 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002233 }
2234
2235 if (work_done) {
2236 be_cq_notify(adapter, txo->cq.id, true, work_done);
2237 atomic_sub(num_wrbs, &txo->q.used);
2238
2239 /* As Tx wrbs have been freed up, wake up netdev queue
2240 * if it was stopped due to lack of tx wrbs. */
2241 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2242 atomic_read(&txo->q.used) < txo->q.len / 2) {
2243 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002244 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002245
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002246 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2247 tx_stats(txo)->tx_compl += work_done;
2248 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2249 }
2250 return (work_done < budget); /* Done */
2251}
Sathya Perla3c8def92011-06-12 20:01:58 +00002252
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002253int be_poll(struct napi_struct *napi, int budget)
2254{
2255 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2256 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002257 int max_work = 0, work, i, num_evts;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002258 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002259
Sathya Perla0b545a62012-11-23 00:27:18 +00002260 num_evts = events_get(eqo);
2261
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002262 /* Process all TXQs serviced by this EQ */
2263 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2264 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2265 eqo->tx_budget, i);
2266 if (!tx_done)
2267 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002268 }
2269
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002270 /* This loop will iterate twice for EQ0 in which
2271 * completions of the last RXQ (default one) are also processed
2272 * For other EQs the loop iterates only once
2273 */
2274 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2275 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2276 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002277 }
2278
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002279 if (is_mcc_eqo(eqo))
2280 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002281
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002282 if (max_work < budget) {
2283 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002284 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002285 } else {
2286 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002287 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002288 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002289 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002290}
2291
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002292void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002293{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002294 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2295 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002296 u32 i;
2297
Sathya Perlad23e9462012-12-17 19:38:51 +00002298 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002299 return;
2300
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002301 if (lancer_chip(adapter)) {
2302 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2303 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2304 sliport_err1 = ioread32(adapter->db +
2305 SLIPORT_ERROR1_OFFSET);
2306 sliport_err2 = ioread32(adapter->db +
2307 SLIPORT_ERROR2_OFFSET);
2308 }
2309 } else {
2310 pci_read_config_dword(adapter->pdev,
2311 PCICFG_UE_STATUS_LOW, &ue_lo);
2312 pci_read_config_dword(adapter->pdev,
2313 PCICFG_UE_STATUS_HIGH, &ue_hi);
2314 pci_read_config_dword(adapter->pdev,
2315 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2316 pci_read_config_dword(adapter->pdev,
2317 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002318
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002319 ue_lo = (ue_lo & ~ue_lo_mask);
2320 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002321 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002322
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002323 /* On certain platforms BE hardware can indicate spurious UEs.
2324 * Allow the h/w to stop working completely in case of a real UE.
2325 * Hence not setting the hw_error for UE detection.
2326 */
2327 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002328 adapter->hw_error = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002329 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002330 "Error detected in the card\n");
2331 }
2332
2333 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2334 dev_err(&adapter->pdev->dev,
2335 "ERR: sliport status 0x%x\n", sliport_status);
2336 dev_err(&adapter->pdev->dev,
2337 "ERR: sliport error1 0x%x\n", sliport_err1);
2338 dev_err(&adapter->pdev->dev,
2339 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002340 }
2341
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002342 if (ue_lo) {
2343 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2344 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002345 dev_err(&adapter->pdev->dev,
2346 "UE: %s bit set\n", ue_status_low_desc[i]);
2347 }
2348 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002349
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002350 if (ue_hi) {
2351 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2352 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002353 dev_err(&adapter->pdev->dev,
2354 "UE: %s bit set\n", ue_status_hi_desc[i]);
2355 }
2356 }
2357
2358}
2359
Sathya Perla8d56ff12009-11-22 22:02:26 +00002360static void be_msix_disable(struct be_adapter *adapter)
2361{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002362 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002363 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002364 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002365 }
2366}
2367
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002368static uint be_num_rss_want(struct be_adapter *adapter)
2369{
Yuval Mintz30e80b52012-07-01 03:19:00 +00002370 u32 num = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002371
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002372 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002373 (lancer_chip(adapter) ||
2374 (!sriov_want(adapter) && be_physfn(adapter)))) {
2375 num = adapter->max_rss_queues;
Yuval Mintz30e80b52012-07-01 03:19:00 +00002376 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2377 }
2378 return num;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002379}
2380
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002381static void be_msix_enable(struct be_adapter *adapter)
2382{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002383#define BE_MIN_MSIX_VECTORS 1
Parav Pandit045508a2012-03-26 14:27:13 +00002384 int i, status, num_vec, num_roce_vec = 0;
Sathya Perlad3791422012-09-28 04:39:44 +00002385 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002386
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002387 /* If RSS queues are not used, need a vec for default RX Q */
2388 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
Parav Pandit045508a2012-03-26 14:27:13 +00002389 if (be_roce_supported(adapter)) {
2390 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2391 (num_online_cpus() + 1));
2392 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2393 num_vec += num_roce_vec;
2394 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2395 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002396 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002397
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002398 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002399 adapter->msix_entries[i].entry = i;
2400
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002401 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002402 if (status == 0) {
2403 goto done;
2404 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002405 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002406 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002407 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002408 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002409 }
Sathya Perlad3791422012-09-28 04:39:44 +00002410
2411 dev_warn(dev, "MSIx enable failed\n");
Sathya Perla3abcded2010-10-03 22:12:27 -07002412 return;
2413done:
Parav Pandit045508a2012-03-26 14:27:13 +00002414 if (be_roce_supported(adapter)) {
2415 if (num_vec > num_roce_vec) {
2416 adapter->num_msix_vec = num_vec - num_roce_vec;
2417 adapter->num_msix_roce_vec =
2418 num_vec - adapter->num_msix_vec;
2419 } else {
2420 adapter->num_msix_vec = num_vec;
2421 adapter->num_msix_roce_vec = 0;
2422 }
2423 } else
2424 adapter->num_msix_vec = num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002425 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002426 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002427}
2428
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002429static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002430 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002431{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002432 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002433}
2434
2435static int be_msix_register(struct be_adapter *adapter)
2436{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002437 struct net_device *netdev = adapter->netdev;
2438 struct be_eq_obj *eqo;
2439 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002440
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002441 for_all_evt_queues(adapter, eqo, i) {
2442 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2443 vec = be_msix_vec_get(adapter, eqo);
2444 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002445 if (status)
2446 goto err_msix;
2447 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002448
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002449 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002450err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002451 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2452 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2453 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2454 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002455 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002456 return status;
2457}
2458
2459static int be_irq_register(struct be_adapter *adapter)
2460{
2461 struct net_device *netdev = adapter->netdev;
2462 int status;
2463
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002464 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002465 status = be_msix_register(adapter);
2466 if (status == 0)
2467 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002468 /* INTx is not supported for VF */
2469 if (!be_physfn(adapter))
2470 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002471 }
2472
Sathya Perlae49cc342012-11-27 19:50:02 +00002473 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002474 netdev->irq = adapter->pdev->irq;
2475 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002476 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002477 if (status) {
2478 dev_err(&adapter->pdev->dev,
2479 "INTx request IRQ failed - err %d\n", status);
2480 return status;
2481 }
2482done:
2483 adapter->isr_registered = true;
2484 return 0;
2485}
2486
2487static void be_irq_unregister(struct be_adapter *adapter)
2488{
2489 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002490 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002491 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002492
2493 if (!adapter->isr_registered)
2494 return;
2495
2496 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002497 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002498 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002499 goto done;
2500 }
2501
2502 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002503 for_all_evt_queues(adapter, eqo, i)
2504 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002505
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002506done:
2507 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002508}
2509
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002510static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002511{
2512 struct be_queue_info *q;
2513 struct be_rx_obj *rxo;
2514 int i;
2515
2516 for_all_rx_queues(adapter, rxo, i) {
2517 q = &rxo->q;
2518 if (q->created) {
2519 be_cmd_rxq_destroy(adapter, q);
2520 /* After the rxq is invalidated, wait for a grace time
2521 * of 1ms for all dma to end and the flush compl to
2522 * arrive
2523 */
2524 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002525 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002526 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002527 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002528 }
2529}
2530
Sathya Perla889cd4b2010-05-30 23:33:45 +00002531static int be_close(struct net_device *netdev)
2532{
2533 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002534 struct be_eq_obj *eqo;
2535 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002536
Parav Pandit045508a2012-03-26 14:27:13 +00002537 be_roce_dev_close(adapter);
2538
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002539 for_all_evt_queues(adapter, eqo, i)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002540 napi_disable(&eqo->napi);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002541
2542 be_async_mcc_disable(adapter);
2543
2544 /* Wait for all pending tx completions to arrive so that
2545 * all tx skbs are freed.
2546 */
2547 be_tx_compl_clean(adapter);
2548
2549 be_rx_qs_destroy(adapter);
2550
2551 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002552 if (msix_enabled(adapter))
2553 synchronize_irq(be_msix_vec_get(adapter, eqo));
2554 else
2555 synchronize_irq(netdev->irq);
2556 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002557 }
2558
Sathya Perla889cd4b2010-05-30 23:33:45 +00002559 be_irq_unregister(adapter);
2560
Sathya Perla482c9e72011-06-29 23:33:17 +00002561 return 0;
2562}
2563
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002564static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002565{
2566 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002567 int rc, i, j;
2568 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002569
2570 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002571 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2572 sizeof(struct be_eth_rx_d));
2573 if (rc)
2574 return rc;
2575 }
2576
2577 /* The FW would like the default RXQ to be created first */
2578 rxo = default_rxo(adapter);
2579 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2580 adapter->if_handle, false, &rxo->rss_id);
2581 if (rc)
2582 return rc;
2583
2584 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002585 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002586 rx_frag_size, adapter->if_handle,
2587 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002588 if (rc)
2589 return rc;
2590 }
2591
2592 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002593 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2594 for_all_rss_queues(adapter, rxo, i) {
2595 if ((j + i) >= 128)
2596 break;
2597 rsstable[j + i] = rxo->rss_id;
2598 }
2599 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002600 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2601 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2602
2603 if (!BEx_chip(adapter))
2604 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2605 RSS_ENABLE_UDP_IPV6;
2606
2607 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2608 128);
2609 if (rc) {
2610 adapter->rss_flags = 0;
Sathya Perla482c9e72011-06-29 23:33:17 +00002611 return rc;
Suresh Reddy594ad542013-04-25 23:03:20 +00002612 }
Sathya Perla482c9e72011-06-29 23:33:17 +00002613 }
2614
2615 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002616 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002617 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002618 return 0;
2619}
2620
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002621static int be_open(struct net_device *netdev)
2622{
2623 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002624 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002625 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002626 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002627 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002628 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002629
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002630 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002631 if (status)
2632 goto err;
2633
Sathya Perla5fb379e2009-06-18 00:02:59 +00002634 be_irq_register(adapter);
2635
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002636 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002637 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002638
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002639 for_all_tx_queues(adapter, txo, i)
2640 be_cq_notify(adapter, txo->cq.id, true, 0);
2641
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002642 be_async_mcc_enable(adapter);
2643
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002644 for_all_evt_queues(adapter, eqo, i) {
2645 napi_enable(&eqo->napi);
2646 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2647 }
2648
Sathya Perla323ff712012-09-28 04:39:43 +00002649 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002650 if (!status)
2651 be_link_status_update(adapter, link_status);
2652
Parav Pandit045508a2012-03-26 14:27:13 +00002653 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002654 return 0;
2655err:
2656 be_close(adapter->netdev);
2657 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002658}
2659
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002660static int be_setup_wol(struct be_adapter *adapter, bool enable)
2661{
2662 struct be_dma_mem cmd;
2663 int status = 0;
2664 u8 mac[ETH_ALEN];
2665
2666 memset(mac, 0, ETH_ALEN);
2667
2668 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002669 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
Joe Perches1f9061d22013-03-15 07:23:58 +00002670 GFP_KERNEL | __GFP_ZERO);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002671 if (cmd.va == NULL)
2672 return -1;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002673
2674 if (enable) {
2675 status = pci_write_config_dword(adapter->pdev,
2676 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2677 if (status) {
2678 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002679 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002680 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2681 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002682 return status;
2683 }
2684 status = be_cmd_enable_magic_wol(adapter,
2685 adapter->netdev->dev_addr, &cmd);
2686 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2687 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2688 } else {
2689 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2690 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2691 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2692 }
2693
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002694 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002695 return status;
2696}
2697
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002698/*
2699 * Generate a seed MAC address from the PF MAC Address using jhash.
2700 * MAC Address for VFs are assigned incrementally starting from the seed.
2701 * These addresses are programmed in the ASIC by the PF and the VF driver
2702 * queries for the MAC address during its probe.
2703 */
Sathya Perla4c876612013-02-03 20:30:11 +00002704static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002705{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002706 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002707 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002708 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002709 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002710
2711 be_vf_eth_addr_generate(adapter, mac);
2712
Sathya Perla11ac75e2011-12-13 00:58:50 +00002713 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002714 if (lancer_chip(adapter)) {
2715 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2716 } else {
2717 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002718 vf_cfg->if_handle,
2719 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002720 }
2721
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002722 if (status)
2723 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002724 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002725 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002726 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002727
2728 mac[5] += 1;
2729 }
2730 return status;
2731}
2732
Sathya Perla4c876612013-02-03 20:30:11 +00002733static int be_vfs_mac_query(struct be_adapter *adapter)
2734{
2735 int status, vf;
2736 u8 mac[ETH_ALEN];
2737 struct be_vf_cfg *vf_cfg;
2738 bool active;
2739
2740 for_all_vfs(adapter, vf_cfg, vf) {
2741 be_cmd_get_mac_from_list(adapter, mac, &active,
2742 &vf_cfg->pmac_id, 0);
2743
2744 status = be_cmd_mac_addr_query(adapter, mac, false,
2745 vf_cfg->if_handle, 0);
2746 if (status)
2747 return status;
2748 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2749 }
2750 return 0;
2751}
2752
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002753static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002754{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002755 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002756 u32 vf;
2757
Sathya Perla39f1d942012-05-08 19:41:24 +00002758 if (be_find_vfs(adapter, ASSIGNED)) {
Sathya Perla4c876612013-02-03 20:30:11 +00002759 dev_warn(&adapter->pdev->dev,
2760 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00002761 goto done;
2762 }
2763
Sathya Perla11ac75e2011-12-13 00:58:50 +00002764 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002765 if (lancer_chip(adapter))
2766 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2767 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002768 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2769 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002770
Sathya Perla11ac75e2011-12-13 00:58:50 +00002771 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2772 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002773 pci_disable_sriov(adapter->pdev);
2774done:
2775 kfree(adapter->vf_cfg);
2776 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002777}
2778
Sathya Perlaa54769f2011-10-24 02:45:00 +00002779static int be_clear(struct be_adapter *adapter)
2780{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002781 int i = 1;
2782
Sathya Perla191eb752012-02-23 18:50:13 +00002783 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2784 cancel_delayed_work_sync(&adapter->work);
2785 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2786 }
2787
Sathya Perla11ac75e2011-12-13 00:58:50 +00002788 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002789 be_vf_clear(adapter);
2790
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002791 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2792 be_cmd_pmac_del(adapter, adapter->if_handle,
2793 adapter->pmac_id[i], 0);
2794
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002795 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002796
2797 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002798 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002799 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002800 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002801
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002802 kfree(adapter->pmac_id);
2803 adapter->pmac_id = NULL;
2804
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002805 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002806 return 0;
2807}
2808
Sathya Perla4c876612013-02-03 20:30:11 +00002809static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002810{
Sathya Perla4c876612013-02-03 20:30:11 +00002811 struct be_vf_cfg *vf_cfg;
2812 u32 cap_flags, en_flags, vf;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002813 int status;
2814
Sathya Perla4c876612013-02-03 20:30:11 +00002815 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2816 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002817
Sathya Perla4c876612013-02-03 20:30:11 +00002818 for_all_vfs(adapter, vf_cfg, vf) {
2819 if (!BE3_chip(adapter))
Vasundhara Volama05f99d2013-04-21 23:28:17 +00002820 be_cmd_get_profile_config(adapter, &cap_flags,
2821 NULL, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00002822
2823 /* If a FW profile exists, then cap_flags are updated */
2824 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2825 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2826 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2827 &vf_cfg->if_handle, vf + 1);
2828 if (status)
2829 goto err;
2830 }
2831err:
2832 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002833}
2834
Sathya Perla39f1d942012-05-08 19:41:24 +00002835static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002836{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002837 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002838 int vf;
2839
Sathya Perla39f1d942012-05-08 19:41:24 +00002840 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2841 GFP_KERNEL);
2842 if (!adapter->vf_cfg)
2843 return -ENOMEM;
2844
Sathya Perla11ac75e2011-12-13 00:58:50 +00002845 for_all_vfs(adapter, vf_cfg, vf) {
2846 vf_cfg->if_handle = -1;
2847 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002848 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002849 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002850}
2851
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002852static int be_vf_setup(struct be_adapter *adapter)
2853{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002854 struct be_vf_cfg *vf_cfg;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002855 u16 def_vlan, lnk_speed;
Sathya Perla4c876612013-02-03 20:30:11 +00002856 int status, old_vfs, vf;
2857 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002858
Sathya Perla4c876612013-02-03 20:30:11 +00002859 old_vfs = be_find_vfs(adapter, ENABLED);
2860 if (old_vfs) {
2861 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2862 if (old_vfs != num_vfs)
2863 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2864 adapter->num_vfs = old_vfs;
Sathya Perla39f1d942012-05-08 19:41:24 +00002865 } else {
Sathya Perla4c876612013-02-03 20:30:11 +00002866 if (num_vfs > adapter->dev_num_vfs)
2867 dev_info(dev, "Device supports %d VFs and not %d\n",
2868 adapter->dev_num_vfs, num_vfs);
2869 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
2870
2871 status = pci_enable_sriov(adapter->pdev, num_vfs);
2872 if (status) {
2873 dev_err(dev, "SRIOV enable failed\n");
2874 adapter->num_vfs = 0;
2875 return 0;
2876 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002877 }
2878
2879 status = be_vf_setup_init(adapter);
2880 if (status)
2881 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002882
Sathya Perla4c876612013-02-03 20:30:11 +00002883 if (old_vfs) {
2884 for_all_vfs(adapter, vf_cfg, vf) {
2885 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2886 if (status)
2887 goto err;
2888 }
2889 } else {
2890 status = be_vfs_if_create(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002891 if (status)
2892 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002893 }
2894
Sathya Perla4c876612013-02-03 20:30:11 +00002895 if (old_vfs) {
2896 status = be_vfs_mac_query(adapter);
2897 if (status)
2898 goto err;
2899 } else {
Sathya Perla39f1d942012-05-08 19:41:24 +00002900 status = be_vf_eth_addr_config(adapter);
2901 if (status)
2902 goto err;
2903 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002904
Sathya Perla11ac75e2011-12-13 00:58:50 +00002905 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla4c876612013-02-03 20:30:11 +00002906 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2907 * Allow full available bandwidth
2908 */
2909 if (BE3_chip(adapter) && !old_vfs)
2910 be_cmd_set_qos(adapter, 1000, vf+1);
2911
2912 status = be_cmd_link_status_query(adapter, &lnk_speed,
2913 NULL, vf + 1);
2914 if (!status)
2915 vf_cfg->tx_rate = lnk_speed;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002916
2917 status = be_cmd_get_hsw_config(adapter, &def_vlan,
Sathya Perla4c876612013-02-03 20:30:11 +00002918 vf + 1, vf_cfg->if_handle);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002919 if (status)
2920 goto err;
2921 vf_cfg->def_vid = def_vlan;
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00002922
2923 be_cmd_enable_vf(adapter, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002924 }
2925 return 0;
2926err:
Sathya Perla4c876612013-02-03 20:30:11 +00002927 dev_err(dev, "VF setup failed\n");
2928 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002929 return status;
2930}
2931
Sathya Perla30128032011-11-10 19:17:57 +00002932static void be_setup_init(struct be_adapter *adapter)
2933{
2934 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002935 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002936 adapter->if_handle = -1;
2937 adapter->be3_native = false;
2938 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002939 if (be_physfn(adapter))
2940 adapter->cmd_privileges = MAX_PRIVILEGES;
2941 else
2942 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00002943}
2944
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002945static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2946 bool *active_mac, u32 *pmac_id)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002947{
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002948 int status = 0;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002949
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002950 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2951 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2952 if (!lancer_chip(adapter) && !be_physfn(adapter))
2953 *active_mac = true;
2954 else
2955 *active_mac = false;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002956
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002957 return status;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002958 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002959
2960 if (lancer_chip(adapter)) {
2961 status = be_cmd_get_mac_from_list(adapter, mac,
2962 active_mac, pmac_id, 0);
2963 if (*active_mac) {
Sathya Perla5ee49792012-09-28 04:39:41 +00002964 status = be_cmd_mac_addr_query(adapter, mac, false,
2965 if_handle, *pmac_id);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002966 }
2967 } else if (be_physfn(adapter)) {
2968 /* For BE3, for PF get permanent MAC */
Sathya Perla5ee49792012-09-28 04:39:41 +00002969 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002970 *active_mac = false;
2971 } else {
2972 /* For BE3, for VF get soft MAC assigned by PF*/
Sathya Perla5ee49792012-09-28 04:39:41 +00002973 status = be_cmd_mac_addr_query(adapter, mac, false,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002974 if_handle, 0);
2975 *active_mac = true;
2976 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002977 return status;
2978}
2979
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002980static void be_get_resources(struct be_adapter *adapter)
2981{
Sathya Perla4c876612013-02-03 20:30:11 +00002982 u16 dev_num_vfs;
2983 int pos, status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002984 bool profile_present = false;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00002985 u16 txq_count = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002986
Sathya Perla4c876612013-02-03 20:30:11 +00002987 if (!BEx_chip(adapter)) {
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002988 status = be_cmd_get_func_config(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002989 if (!status)
2990 profile_present = true;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00002991 } else if (BE3_chip(adapter) && be_physfn(adapter)) {
2992 be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002993 }
2994
2995 if (profile_present) {
2996 /* Sanity fixes for Lancer */
2997 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2998 BE_UC_PMAC_COUNT);
2999 adapter->max_vlans = min_t(u16, adapter->max_vlans,
3000 BE_NUM_VLANS_SUPPORTED);
3001 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
3002 BE_MAX_MC);
3003 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3004 MAX_TX_QS);
3005 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
3006 BE3_MAX_RSS_QS);
3007 adapter->max_event_queues = min_t(u16,
3008 adapter->max_event_queues,
3009 BE3_MAX_RSS_QS);
3010
3011 if (adapter->max_rss_queues &&
3012 adapter->max_rss_queues == adapter->max_rx_queues)
3013 adapter->max_rss_queues -= 1;
3014
3015 if (adapter->max_event_queues < adapter->max_rss_queues)
3016 adapter->max_rss_queues = adapter->max_event_queues;
3017
3018 } else {
3019 if (be_physfn(adapter))
3020 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3021 else
3022 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3023
3024 if (adapter->function_mode & FLEX10_MODE)
3025 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3026 else
3027 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3028
3029 adapter->max_mcast_mac = BE_MAX_MC;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003030 adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
3031 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3032 MAX_TX_QS);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003033 adapter->max_rss_queues = (adapter->be3_native) ?
3034 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3035 adapter->max_event_queues = BE3_MAX_RSS_QS;
3036
3037 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
3038 BE_IF_FLAGS_BROADCAST |
3039 BE_IF_FLAGS_MULTICAST |
3040 BE_IF_FLAGS_PASS_L3L4_ERRORS |
3041 BE_IF_FLAGS_MCAST_PROMISCUOUS |
3042 BE_IF_FLAGS_VLAN_PROMISCUOUS |
3043 BE_IF_FLAGS_PROMISCUOUS;
3044
3045 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3046 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
3047 }
Sathya Perla4c876612013-02-03 20:30:11 +00003048
3049 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
3050 if (pos) {
3051 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
3052 &dev_num_vfs);
3053 if (BE3_chip(adapter))
3054 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
3055 adapter->dev_num_vfs = dev_num_vfs;
3056 }
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003057}
3058
Sathya Perla39f1d942012-05-08 19:41:24 +00003059/* Routine to query per function resource limits */
3060static int be_get_config(struct be_adapter *adapter)
3061{
Sathya Perla4c876612013-02-03 20:30:11 +00003062 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003063
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003064 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3065 &adapter->function_mode,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003066 &adapter->function_caps,
3067 &adapter->asic_rev);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003068 if (status)
3069 goto err;
3070
3071 be_get_resources(adapter);
3072
3073 /* primary mac needs 1 pmac entry */
3074 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3075 sizeof(u32), GFP_KERNEL);
3076 if (!adapter->pmac_id) {
3077 status = -ENOMEM;
3078 goto err;
3079 }
3080
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003081err:
3082 return status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003083}
3084
Sathya Perla5fb379e2009-06-18 00:02:59 +00003085static int be_setup(struct be_adapter *adapter)
3086{
Sathya Perla39f1d942012-05-08 19:41:24 +00003087 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003088 u32 en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003089 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003090 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003091 u8 mac[ETH_ALEN];
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003092 bool active_mac;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003093
Sathya Perla30128032011-11-10 19:17:57 +00003094 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003095
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003096 if (!lancer_chip(adapter))
3097 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003098
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003099 status = be_get_config(adapter);
3100 if (status)
3101 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003102
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003103 be_msix_enable(adapter);
3104
3105 status = be_evt_queues_create(adapter);
3106 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003107 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003108
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003109 status = be_tx_cqs_create(adapter);
3110 if (status)
3111 goto err;
3112
3113 status = be_rx_cqs_create(adapter);
3114 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003115 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003116
Sathya Perla5fb379e2009-06-18 00:02:59 +00003117 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003118 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003119 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003120
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003121 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3122 /* In UMC mode FW does not return right privileges.
3123 * Override with correct privilege equivalent to PF.
3124 */
3125 if (be_is_mc(adapter))
3126 adapter->cmd_privileges = MAX_PRIVILEGES;
3127
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003128 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3129 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00003130
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003131 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003132 en_flags |= BE_IF_FLAGS_RSS;
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003133
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003134 en_flags = en_flags & adapter->if_cap_flags;
Padmanabh Ratnakar0b13fb42012-07-18 02:51:58 +00003135
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003136 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003137 &adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003138 if (status != 0)
3139 goto err;
3140
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003141 memset(mac, 0, ETH_ALEN);
3142 active_mac = false;
3143 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3144 &active_mac, &adapter->pmac_id[0]);
3145 if (status != 0)
3146 goto err;
3147
3148 if (!active_mac) {
3149 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3150 &adapter->pmac_id[0], 0);
3151 if (status != 0)
3152 goto err;
3153 }
3154
3155 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3156 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3157 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003158 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00003159
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003160 status = be_tx_qs_create(adapter);
3161 if (status)
3162 goto err;
3163
Sathya Perla04b71172011-09-27 13:30:27 -04003164 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003165
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003166 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003167 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003168
3169 be_set_rx_mode(adapter->netdev);
3170
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003171 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003172
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003173 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3174 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003175 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003176
Sathya Perla39f1d942012-05-08 19:41:24 +00003177 if (be_physfn(adapter) && num_vfs) {
3178 if (adapter->dev_num_vfs)
3179 be_vf_setup(adapter);
3180 else
3181 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003182 }
3183
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003184 status = be_cmd_get_phy_info(adapter);
3185 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003186 adapter->phy.fc_autoneg = 1;
3187
Sathya Perla191eb752012-02-23 18:50:13 +00003188 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3189 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003190 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003191err:
3192 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003193 return status;
3194}
3195
Ivan Vecera66268732011-12-08 01:31:21 +00003196#ifdef CONFIG_NET_POLL_CONTROLLER
3197static void be_netpoll(struct net_device *netdev)
3198{
3199 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003200 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003201 int i;
3202
Sathya Perlae49cc342012-11-27 19:50:02 +00003203 for_all_evt_queues(adapter, eqo, i) {
3204 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3205 napi_schedule(&eqo->napi);
3206 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003207
3208 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003209}
3210#endif
3211
Ajit Khaparde84517482009-09-04 03:12:16 +00003212#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003213char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3214
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003215static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003216 const u8 *p, u32 img_start, int image_size,
3217 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003218{
3219 u32 crc_offset;
3220 u8 flashed_crc[4];
3221 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003222
3223 crc_offset = hdr_size + img_start + image_size - 4;
3224
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003225 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003226
3227 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003228 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003229 if (status) {
3230 dev_err(&adapter->pdev->dev,
3231 "could not get crc from flash, not flashing redboot\n");
3232 return false;
3233 }
3234
3235 /*update redboot only if crc does not match*/
3236 if (!memcmp(flashed_crc, p, 4))
3237 return false;
3238 else
3239 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003240}
3241
Sathya Perla306f1342011-08-02 19:57:45 +00003242static bool phy_flashing_required(struct be_adapter *adapter)
3243{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003244 return (adapter->phy.phy_type == TN_8022 &&
3245 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003246}
3247
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003248static bool is_comp_in_ufi(struct be_adapter *adapter,
3249 struct flash_section_info *fsec, int type)
3250{
3251 int i = 0, img_type = 0;
3252 struct flash_section_info_g2 *fsec_g2 = NULL;
3253
Sathya Perlaca34fe32012-11-06 17:48:56 +00003254 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003255 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3256
3257 for (i = 0; i < MAX_FLASH_COMP; i++) {
3258 if (fsec_g2)
3259 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3260 else
3261 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3262
3263 if (img_type == type)
3264 return true;
3265 }
3266 return false;
3267
3268}
3269
3270struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3271 int header_size,
3272 const struct firmware *fw)
3273{
3274 struct flash_section_info *fsec = NULL;
3275 const u8 *p = fw->data;
3276
3277 p += header_size;
3278 while (p < (fw->data + fw->size)) {
3279 fsec = (struct flash_section_info *)p;
3280 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3281 return fsec;
3282 p += 32;
3283 }
3284 return NULL;
3285}
3286
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003287static int be_flash(struct be_adapter *adapter, const u8 *img,
3288 struct be_dma_mem *flash_cmd, int optype, int img_size)
3289{
3290 u32 total_bytes = 0, flash_op, num_bytes = 0;
3291 int status = 0;
3292 struct be_cmd_write_flashrom *req = flash_cmd->va;
3293
3294 total_bytes = img_size;
3295 while (total_bytes) {
3296 num_bytes = min_t(u32, 32*1024, total_bytes);
3297
3298 total_bytes -= num_bytes;
3299
3300 if (!total_bytes) {
3301 if (optype == OPTYPE_PHY_FW)
3302 flash_op = FLASHROM_OPER_PHY_FLASH;
3303 else
3304 flash_op = FLASHROM_OPER_FLASH;
3305 } else {
3306 if (optype == OPTYPE_PHY_FW)
3307 flash_op = FLASHROM_OPER_PHY_SAVE;
3308 else
3309 flash_op = FLASHROM_OPER_SAVE;
3310 }
3311
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003312 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003313 img += num_bytes;
3314 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3315 flash_op, num_bytes);
3316 if (status) {
3317 if (status == ILLEGAL_IOCTL_REQ &&
3318 optype == OPTYPE_PHY_FW)
3319 break;
3320 dev_err(&adapter->pdev->dev,
3321 "cmd to write to flash rom failed.\n");
3322 return status;
3323 }
3324 }
3325 return 0;
3326}
3327
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003328/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003329static int be_flash_BEx(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003330 const struct firmware *fw,
3331 struct be_dma_mem *flash_cmd,
3332 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003333
Ajit Khaparde84517482009-09-04 03:12:16 +00003334{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003335 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003336 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003337 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003338 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003339 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003340 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003341
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003342 struct flash_comp gen3_flash_types[] = {
3343 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3344 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3345 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3346 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3347 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3348 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3349 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3350 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3351 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3352 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3353 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3354 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3355 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3356 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3357 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3358 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3359 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3360 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3361 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3362 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003363 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003364
3365 struct flash_comp gen2_flash_types[] = {
3366 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3367 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3368 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3369 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3370 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3371 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3372 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3373 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3374 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3375 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3376 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3377 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3378 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3379 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3380 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3381 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003382 };
3383
Sathya Perlaca34fe32012-11-06 17:48:56 +00003384 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003385 pflashcomp = gen3_flash_types;
3386 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003387 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003388 } else {
3389 pflashcomp = gen2_flash_types;
3390 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003391 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003392 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003393
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003394 /* Get flash section info*/
3395 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3396 if (!fsec) {
3397 dev_err(&adapter->pdev->dev,
3398 "Invalid Cookie. UFI corrupted ?\n");
3399 return -1;
3400 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003401 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003402 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003403 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003404
3405 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3406 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3407 continue;
3408
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003409 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3410 !phy_flashing_required(adapter))
3411 continue;
3412
3413 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3414 redboot = be_flash_redboot(adapter, fw->data,
3415 pflashcomp[i].offset, pflashcomp[i].size,
3416 filehdr_size + img_hdrs_size);
3417 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003418 continue;
3419 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003420
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003421 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003422 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003423 if (p + pflashcomp[i].size > fw->data + fw->size)
3424 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003425
3426 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3427 pflashcomp[i].size);
3428 if (status) {
3429 dev_err(&adapter->pdev->dev,
3430 "Flashing section type %d failed.\n",
3431 pflashcomp[i].img_type);
3432 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003433 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003434 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003435 return 0;
3436}
3437
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003438static int be_flash_skyhawk(struct be_adapter *adapter,
3439 const struct firmware *fw,
3440 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003441{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003442 int status = 0, i, filehdr_size = 0;
3443 int img_offset, img_size, img_optype, redboot;
3444 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3445 const u8 *p = fw->data;
3446 struct flash_section_info *fsec = NULL;
3447
3448 filehdr_size = sizeof(struct flash_file_hdr_g3);
3449 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3450 if (!fsec) {
3451 dev_err(&adapter->pdev->dev,
3452 "Invalid Cookie. UFI corrupted ?\n");
3453 return -1;
3454 }
3455
3456 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3457 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3458 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3459
3460 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3461 case IMAGE_FIRMWARE_iSCSI:
3462 img_optype = OPTYPE_ISCSI_ACTIVE;
3463 break;
3464 case IMAGE_BOOT_CODE:
3465 img_optype = OPTYPE_REDBOOT;
3466 break;
3467 case IMAGE_OPTION_ROM_ISCSI:
3468 img_optype = OPTYPE_BIOS;
3469 break;
3470 case IMAGE_OPTION_ROM_PXE:
3471 img_optype = OPTYPE_PXE_BIOS;
3472 break;
3473 case IMAGE_OPTION_ROM_FCoE:
3474 img_optype = OPTYPE_FCOE_BIOS;
3475 break;
3476 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3477 img_optype = OPTYPE_ISCSI_BACKUP;
3478 break;
3479 case IMAGE_NCSI:
3480 img_optype = OPTYPE_NCSI_FW;
3481 break;
3482 default:
3483 continue;
3484 }
3485
3486 if (img_optype == OPTYPE_REDBOOT) {
3487 redboot = be_flash_redboot(adapter, fw->data,
3488 img_offset, img_size,
3489 filehdr_size + img_hdrs_size);
3490 if (!redboot)
3491 continue;
3492 }
3493
3494 p = fw->data;
3495 p += filehdr_size + img_offset + img_hdrs_size;
3496 if (p + img_size > fw->data + fw->size)
3497 return -1;
3498
3499 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3500 if (status) {
3501 dev_err(&adapter->pdev->dev,
3502 "Flashing section type %d failed.\n",
3503 fsec->fsec_entry[i].type);
3504 return status;
3505 }
3506 }
3507 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003508}
3509
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003510static int lancer_wait_idle(struct be_adapter *adapter)
3511{
3512#define SLIPORT_IDLE_TIMEOUT 30
3513 u32 reg_val;
3514 int status = 0, i;
3515
3516 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3517 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3518 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3519 break;
3520
3521 ssleep(1);
3522 }
3523
3524 if (i == SLIPORT_IDLE_TIMEOUT)
3525 status = -1;
3526
3527 return status;
3528}
3529
3530static int lancer_fw_reset(struct be_adapter *adapter)
3531{
3532 int status = 0;
3533
3534 status = lancer_wait_idle(adapter);
3535 if (status)
3536 return status;
3537
3538 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3539 PHYSDEV_CONTROL_OFFSET);
3540
3541 return status;
3542}
3543
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003544static int lancer_fw_download(struct be_adapter *adapter,
3545 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003546{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003547#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3548#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3549 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003550 const u8 *data_ptr = NULL;
3551 u8 *dest_image_ptr = NULL;
3552 size_t image_size = 0;
3553 u32 chunk_size = 0;
3554 u32 data_written = 0;
3555 u32 offset = 0;
3556 int status = 0;
3557 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003558 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003559
3560 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3561 dev_err(&adapter->pdev->dev,
3562 "FW Image not properly aligned. "
3563 "Length must be 4 byte aligned.\n");
3564 status = -EINVAL;
3565 goto lancer_fw_exit;
3566 }
3567
3568 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3569 + LANCER_FW_DOWNLOAD_CHUNK;
3570 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00003571 &flash_cmd.dma, GFP_KERNEL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003572 if (!flash_cmd.va) {
3573 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003574 goto lancer_fw_exit;
3575 }
3576
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003577 dest_image_ptr = flash_cmd.va +
3578 sizeof(struct lancer_cmd_req_write_object);
3579 image_size = fw->size;
3580 data_ptr = fw->data;
3581
3582 while (image_size) {
3583 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3584
3585 /* Copy the image chunk content. */
3586 memcpy(dest_image_ptr, data_ptr, chunk_size);
3587
3588 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003589 chunk_size, offset,
3590 LANCER_FW_DOWNLOAD_LOCATION,
3591 &data_written, &change_status,
3592 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003593 if (status)
3594 break;
3595
3596 offset += data_written;
3597 data_ptr += data_written;
3598 image_size -= data_written;
3599 }
3600
3601 if (!status) {
3602 /* Commit the FW written */
3603 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003604 0, offset,
3605 LANCER_FW_DOWNLOAD_LOCATION,
3606 &data_written, &change_status,
3607 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003608 }
3609
3610 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3611 flash_cmd.dma);
3612 if (status) {
3613 dev_err(&adapter->pdev->dev,
3614 "Firmware load error. "
3615 "Status code: 0x%x Additional Status: 0x%x\n",
3616 status, add_status);
3617 goto lancer_fw_exit;
3618 }
3619
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003620 if (change_status == LANCER_FW_RESET_NEEDED) {
3621 status = lancer_fw_reset(adapter);
3622 if (status) {
3623 dev_err(&adapter->pdev->dev,
3624 "Adapter busy for FW reset.\n"
3625 "New FW will not be active.\n");
3626 goto lancer_fw_exit;
3627 }
3628 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3629 dev_err(&adapter->pdev->dev,
3630 "System reboot required for new FW"
3631 " to be active\n");
3632 }
3633
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003634 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3635lancer_fw_exit:
3636 return status;
3637}
3638
Sathya Perlaca34fe32012-11-06 17:48:56 +00003639#define UFI_TYPE2 2
3640#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003641#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00003642#define UFI_TYPE4 4
3643static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003644 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003645{
3646 if (fhdr == NULL)
3647 goto be_get_ufi_exit;
3648
Sathya Perlaca34fe32012-11-06 17:48:56 +00003649 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3650 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003651 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3652 if (fhdr->asic_type_rev == 0x10)
3653 return UFI_TYPE3R;
3654 else
3655 return UFI_TYPE3;
3656 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00003657 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003658
3659be_get_ufi_exit:
3660 dev_err(&adapter->pdev->dev,
3661 "UFI and Interface are not compatible for flashing\n");
3662 return -1;
3663}
3664
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003665static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3666{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003667 struct flash_file_hdr_g3 *fhdr3;
3668 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003669 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003670 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003671 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003672
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003673 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003674 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3675 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003676 if (!flash_cmd.va) {
3677 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003678 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003679 }
3680
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003681 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003682 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003683
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003684 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003685
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003686 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3687 for (i = 0; i < num_imgs; i++) {
3688 img_hdr_ptr = (struct image_hdr *)(fw->data +
3689 (sizeof(struct flash_file_hdr_g3) +
3690 i * sizeof(struct image_hdr)));
3691 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003692 switch (ufi_type) {
3693 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003694 status = be_flash_skyhawk(adapter, fw,
3695 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003696 break;
3697 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00003698 status = be_flash_BEx(adapter, fw, &flash_cmd,
3699 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003700 break;
3701 case UFI_TYPE3:
3702 /* Do not flash this ufi on BE3-R cards */
3703 if (adapter->asic_rev < 0x10)
3704 status = be_flash_BEx(adapter, fw,
3705 &flash_cmd,
3706 num_imgs);
3707 else {
3708 status = -1;
3709 dev_err(&adapter->pdev->dev,
3710 "Can't load BE3 UFI on BE3R\n");
3711 }
3712 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003713 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003714 }
3715
Sathya Perlaca34fe32012-11-06 17:48:56 +00003716 if (ufi_type == UFI_TYPE2)
3717 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003718 else if (ufi_type == -1)
3719 status = -1;
3720
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003721 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3722 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003723 if (status) {
3724 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003725 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003726 }
3727
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003728 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003729
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003730be_fw_exit:
3731 return status;
3732}
3733
3734int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3735{
3736 const struct firmware *fw;
3737 int status;
3738
3739 if (!netif_running(adapter->netdev)) {
3740 dev_err(&adapter->pdev->dev,
3741 "Firmware load not allowed (interface is down)\n");
3742 return -1;
3743 }
3744
3745 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3746 if (status)
3747 goto fw_exit;
3748
3749 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3750
3751 if (lancer_chip(adapter))
3752 status = lancer_fw_download(adapter, fw);
3753 else
3754 status = be_fw_download(adapter, fw);
3755
Ajit Khaparde84517482009-09-04 03:12:16 +00003756fw_exit:
3757 release_firmware(fw);
3758 return status;
3759}
3760
stephen hemmingere5686ad2012-01-05 19:10:25 +00003761static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003762 .ndo_open = be_open,
3763 .ndo_stop = be_close,
3764 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003765 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003766 .ndo_set_mac_address = be_mac_addr_set,
3767 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003768 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003769 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003770 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3771 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003772 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003773 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003774 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003775 .ndo_get_vf_config = be_get_vf_config,
3776#ifdef CONFIG_NET_POLL_CONTROLLER
3777 .ndo_poll_controller = be_netpoll,
3778#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003779};
3780
3781static void be_netdev_init(struct net_device *netdev)
3782{
3783 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003784 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003785 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003786
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003787 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003788 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00003789 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003790 if (be_multi_rxq(adapter))
3791 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003792
3793 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00003794 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003795
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003796 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003797 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003798
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003799 netdev->priv_flags |= IFF_UNICAST_FLT;
3800
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003801 netdev->flags |= IFF_MULTICAST;
3802
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00003803 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003804
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003805 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003806
3807 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3808
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003809 for_all_evt_queues(adapter, eqo, i)
3810 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003811}
3812
3813static void be_unmap_pci_bars(struct be_adapter *adapter)
3814{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003815 if (adapter->csr)
3816 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003817 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00003818 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003819}
3820
Sathya Perlace66f782012-11-06 17:48:58 +00003821static int db_bar(struct be_adapter *adapter)
3822{
3823 if (lancer_chip(adapter) || !be_physfn(adapter))
3824 return 0;
3825 else
3826 return 4;
3827}
3828
3829static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00003830{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00003831 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00003832 adapter->roce_db.size = 4096;
3833 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3834 db_bar(adapter));
3835 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3836 db_bar(adapter));
3837 }
Parav Pandit045508a2012-03-26 14:27:13 +00003838 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003839}
3840
3841static int be_map_pci_bars(struct be_adapter *adapter)
3842{
3843 u8 __iomem *addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003844 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003845
Sathya Perlace66f782012-11-06 17:48:58 +00003846 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3847 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3848 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003849
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003850 if (BEx_chip(adapter) && be_physfn(adapter)) {
3851 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3852 if (adapter->csr == NULL)
3853 return -ENOMEM;
3854 }
3855
Sathya Perlace66f782012-11-06 17:48:58 +00003856 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003857 if (addr == NULL)
3858 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003859 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003860
3861 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003862 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00003863
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003864pci_map_err:
3865 be_unmap_pci_bars(adapter);
3866 return -ENOMEM;
3867}
3868
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003869static void be_ctrl_cleanup(struct be_adapter *adapter)
3870{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003871 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003872
3873 be_unmap_pci_bars(adapter);
3874
3875 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003876 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3877 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003878
Sathya Perla5b8821b2011-08-02 19:57:44 +00003879 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003880 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003881 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3882 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003883}
3884
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003885static int be_ctrl_init(struct be_adapter *adapter)
3886{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003887 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3888 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003889 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00003890 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003891 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003892
Sathya Perlace66f782012-11-06 17:48:58 +00003893 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3894 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3895 SLI_INTF_FAMILY_SHIFT;
3896 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3897
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003898 status = be_map_pci_bars(adapter);
3899 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003900 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003901
3902 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003903 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3904 mbox_mem_alloc->size,
3905 &mbox_mem_alloc->dma,
3906 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003907 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003908 status = -ENOMEM;
3909 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003910 }
3911 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3912 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3913 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3914 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003915
Sathya Perla5b8821b2011-08-02 19:57:44 +00003916 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3917 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
Joe Perches1f9061d22013-03-15 07:23:58 +00003918 &rx_filter->dma,
3919 GFP_KERNEL | __GFP_ZERO);
Sathya Perla5b8821b2011-08-02 19:57:44 +00003920 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003921 status = -ENOMEM;
3922 goto free_mbox;
3923 }
Joe Perches1f9061d22013-03-15 07:23:58 +00003924
Ivan Vecera29849612010-12-14 05:43:19 +00003925 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003926 spin_lock_init(&adapter->mcc_lock);
3927 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003928
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003929 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003930 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003931 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003932
3933free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003934 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3935 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003936
3937unmap_pci_bars:
3938 be_unmap_pci_bars(adapter);
3939
3940done:
3941 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003942}
3943
3944static void be_stats_cleanup(struct be_adapter *adapter)
3945{
Sathya Perla3abcded2010-10-03 22:12:27 -07003946 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003947
3948 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003949 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3950 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003951}
3952
3953static int be_stats_init(struct be_adapter *adapter)
3954{
Sathya Perla3abcded2010-10-03 22:12:27 -07003955 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003956
Sathya Perlaca34fe32012-11-06 17:48:56 +00003957 if (lancer_chip(adapter))
3958 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3959 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003960 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Sathya Perlaca34fe32012-11-06 17:48:56 +00003961 else
3962 /* BE3 and Skyhawk */
3963 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3964
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003965 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
Joe Perches1f9061d22013-03-15 07:23:58 +00003966 GFP_KERNEL | __GFP_ZERO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003967 if (cmd->va == NULL)
3968 return -1;
3969 return 0;
3970}
3971
Bill Pemberton3bc6b062012-12-03 09:23:09 -05003972static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003973{
3974 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003975
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003976 if (!adapter)
3977 return;
3978
Parav Pandit045508a2012-03-26 14:27:13 +00003979 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00003980 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00003981
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003982 cancel_delayed_work_sync(&adapter->func_recovery_work);
3983
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003984 unregister_netdev(adapter->netdev);
3985
Sathya Perla5fb379e2009-06-18 00:02:59 +00003986 be_clear(adapter);
3987
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00003988 /* tell fw we're done with firing cmds */
3989 be_cmd_fw_clean(adapter);
3990
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003991 be_stats_cleanup(adapter);
3992
3993 be_ctrl_cleanup(adapter);
3994
Sathya Perlad6b6d982012-09-05 01:56:48 +00003995 pci_disable_pcie_error_reporting(pdev);
3996
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003997 pci_set_drvdata(pdev, NULL);
3998 pci_release_regions(pdev);
3999 pci_disable_device(pdev);
4000
4001 free_netdev(adapter->netdev);
4002}
4003
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004004bool be_is_wol_supported(struct be_adapter *adapter)
4005{
4006 return ((adapter->wol_cap & BE_WOL_CAP) &&
4007 !be_is_wol_excluded(adapter)) ? true : false;
4008}
4009
Somnath Kotur941a77d2012-05-17 22:59:03 +00004010u32 be_get_fw_log_level(struct be_adapter *adapter)
4011{
4012 struct be_dma_mem extfat_cmd;
4013 struct be_fat_conf_params *cfgs;
4014 int status;
4015 u32 level = 0;
4016 int j;
4017
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004018 if (lancer_chip(adapter))
4019 return 0;
4020
Somnath Kotur941a77d2012-05-17 22:59:03 +00004021 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4022 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4023 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4024 &extfat_cmd.dma);
4025
4026 if (!extfat_cmd.va) {
4027 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4028 __func__);
4029 goto err;
4030 }
4031
4032 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4033 if (!status) {
4034 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4035 sizeof(struct be_cmd_resp_hdr));
Anton Blanchardac46a462012-07-24 15:05:25 +00004036 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
Somnath Kotur941a77d2012-05-17 22:59:03 +00004037 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4038 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4039 }
4040 }
4041 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4042 extfat_cmd.dma);
4043err:
4044 return level;
4045}
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004046
Sathya Perla39f1d942012-05-08 19:41:24 +00004047static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004048{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004049 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00004050 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004051
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004052 status = be_cmd_get_cntl_attributes(adapter);
4053 if (status)
4054 return status;
4055
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004056 status = be_cmd_get_acpi_wol_cap(adapter);
4057 if (status) {
4058 /* in case of a failure to get wol capabillities
4059 * check the exclusion list to determine WOL capability */
4060 if (!be_is_wol_excluded(adapter))
4061 adapter->wol_cap |= BE_WOL_CAP;
4062 }
4063
4064 if (be_is_wol_supported(adapter))
4065 adapter->wol = true;
4066
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004067 /* Must be a power of 2 or else MODULO will BUG_ON */
4068 adapter->be_get_temp_freq = 64;
4069
Somnath Kotur941a77d2012-05-17 22:59:03 +00004070 level = be_get_fw_log_level(adapter);
4071 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4072
Sathya Perla2243e2e2009-11-22 22:02:03 +00004073 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004074}
4075
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004076static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004077{
4078 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004079
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004080 status = lancer_test_and_set_rdy_state(adapter);
4081 if (status)
4082 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004083
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004084 if (netif_running(adapter->netdev))
4085 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004086
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004087 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004088
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004089 adapter->hw_error = false;
4090 adapter->fw_timeout = false;
4091
4092 status = be_setup(adapter);
4093 if (status)
4094 goto err;
4095
4096 if (netif_running(adapter->netdev)) {
4097 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004098 if (status)
4099 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004100 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004101
4102 dev_err(&adapter->pdev->dev,
4103 "Adapter SLIPORT recovery succeeded\n");
4104 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004105err:
Padmanabh Ratnakar67297ad2012-10-20 06:02:27 +00004106 if (adapter->eeh_error)
4107 dev_err(&adapter->pdev->dev,
4108 "Adapter SLIPORT recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004109
4110 return status;
4111}
4112
4113static void be_func_recovery_task(struct work_struct *work)
4114{
4115 struct be_adapter *adapter =
4116 container_of(work, struct be_adapter, func_recovery_work.work);
4117 int status;
4118
4119 be_detect_error(adapter);
4120
4121 if (adapter->hw_error && lancer_chip(adapter)) {
4122
4123 if (adapter->eeh_error)
4124 goto out;
4125
4126 rtnl_lock();
4127 netif_device_detach(adapter->netdev);
4128 rtnl_unlock();
4129
4130 status = lancer_recover_func(adapter);
4131
4132 if (!status)
4133 netif_device_attach(adapter->netdev);
4134 }
4135
4136out:
4137 schedule_delayed_work(&adapter->func_recovery_work,
4138 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004139}
4140
4141static void be_worker(struct work_struct *work)
4142{
4143 struct be_adapter *adapter =
4144 container_of(work, struct be_adapter, work.work);
4145 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004146 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004147 int i;
4148
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004149 /* when interrupts are not yet enabled, just reap any pending
4150 * mcc completions */
4151 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004152 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004153 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004154 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004155 goto reschedule;
4156 }
4157
4158 if (!adapter->stats_cmd_sent) {
4159 if (lancer_chip(adapter))
4160 lancer_cmd_get_pport_stats(adapter,
4161 &adapter->stats_cmd);
4162 else
4163 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4164 }
4165
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004166 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4167 be_cmd_get_die_temperature(adapter);
4168
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004169 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004170 if (rxo->rx_post_starved) {
4171 rxo->rx_post_starved = false;
4172 be_post_rx_frags(rxo, GFP_KERNEL);
4173 }
4174 }
4175
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004176 for_all_evt_queues(adapter, eqo, i)
4177 be_eqd_update(adapter, eqo);
4178
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004179reschedule:
4180 adapter->work_counter++;
4181 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4182}
4183
Sathya Perla39f1d942012-05-08 19:41:24 +00004184static bool be_reset_required(struct be_adapter *adapter)
4185{
Sathya Perlad79c0a22012-06-05 19:37:22 +00004186 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004187}
4188
Sathya Perlad3791422012-09-28 04:39:44 +00004189static char *mc_name(struct be_adapter *adapter)
4190{
4191 if (adapter->function_mode & FLEX10_MODE)
4192 return "FLEX10";
4193 else if (adapter->function_mode & VNIC_MODE)
4194 return "vNIC";
4195 else if (adapter->function_mode & UMC_ENABLED)
4196 return "UMC";
4197 else
4198 return "";
4199}
4200
4201static inline char *func_name(struct be_adapter *adapter)
4202{
4203 return be_physfn(adapter) ? "PF" : "VF";
4204}
4205
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004206static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004207{
4208 int status = 0;
4209 struct be_adapter *adapter;
4210 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004211 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004212
4213 status = pci_enable_device(pdev);
4214 if (status)
4215 goto do_none;
4216
4217 status = pci_request_regions(pdev, DRV_NAME);
4218 if (status)
4219 goto disable_dev;
4220 pci_set_master(pdev);
4221
Sathya Perla7f640062012-06-05 19:37:20 +00004222 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004223 if (netdev == NULL) {
4224 status = -ENOMEM;
4225 goto rel_reg;
4226 }
4227 adapter = netdev_priv(netdev);
4228 adapter->pdev = pdev;
4229 pci_set_drvdata(pdev, adapter);
4230 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004231 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004232
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004233 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004234 if (!status) {
Craig Hada2bd92cd2013-04-21 23:28:18 +00004235 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4236 if (status < 0) {
4237 dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4238 goto free_netdev;
4239 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004240 netdev->features |= NETIF_F_HIGHDMA;
4241 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004242 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004243 if (status) {
4244 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4245 goto free_netdev;
4246 }
4247 }
4248
Sathya Perlad6b6d982012-09-05 01:56:48 +00004249 status = pci_enable_pcie_error_reporting(pdev);
4250 if (status)
4251 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4252
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004253 status = be_ctrl_init(adapter);
4254 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004255 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004256
Sathya Perla2243e2e2009-11-22 22:02:03 +00004257 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004258 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004259 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004260 if (status)
4261 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004262 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004263
Sathya Perla39f1d942012-05-08 19:41:24 +00004264 if (be_reset_required(adapter)) {
4265 status = be_cmd_reset_function(adapter);
4266 if (status)
4267 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004268
Kalesh AP2d177be2013-04-28 22:22:29 +00004269 /* Wait for interrupts to quiesce after an FLR */
4270 msleep(100);
4271 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004272
4273 /* Allow interrupts for other ULPs running on NIC function */
4274 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004275
Kalesh AP2d177be2013-04-28 22:22:29 +00004276 /* tell fw we're ready to fire cmds */
4277 status = be_cmd_fw_init(adapter);
4278 if (status)
4279 goto ctrl_clean;
4280
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004281 status = be_stats_init(adapter);
4282 if (status)
4283 goto ctrl_clean;
4284
Sathya Perla39f1d942012-05-08 19:41:24 +00004285 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004286 if (status)
4287 goto stats_clean;
4288
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004289 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004290 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004291 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004292
Sathya Perla5fb379e2009-06-18 00:02:59 +00004293 status = be_setup(adapter);
4294 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004295 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004296
Sathya Perla3abcded2010-10-03 22:12:27 -07004297 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004298 status = register_netdev(netdev);
4299 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004300 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004301
Parav Pandit045508a2012-03-26 14:27:13 +00004302 be_roce_dev_add(adapter);
4303
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004304 schedule_delayed_work(&adapter->func_recovery_work,
4305 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004306
4307 be_cmd_query_port_name(adapter, &port_name);
4308
Sathya Perlad3791422012-09-28 04:39:44 +00004309 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4310 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004311
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004312 return 0;
4313
Sathya Perla5fb379e2009-06-18 00:02:59 +00004314unsetup:
4315 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004316stats_clean:
4317 be_stats_cleanup(adapter);
4318ctrl_clean:
4319 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004320free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004321 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004322 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004323rel_reg:
4324 pci_release_regions(pdev);
4325disable_dev:
4326 pci_disable_device(pdev);
4327do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004328 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004329 return status;
4330}
4331
4332static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4333{
4334 struct be_adapter *adapter = pci_get_drvdata(pdev);
4335 struct net_device *netdev = adapter->netdev;
4336
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004337 if (adapter->wol)
4338 be_setup_wol(adapter, true);
4339
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004340 cancel_delayed_work_sync(&adapter->func_recovery_work);
4341
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004342 netif_device_detach(netdev);
4343 if (netif_running(netdev)) {
4344 rtnl_lock();
4345 be_close(netdev);
4346 rtnl_unlock();
4347 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004348 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004349
4350 pci_save_state(pdev);
4351 pci_disable_device(pdev);
4352 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4353 return 0;
4354}
4355
4356static int be_resume(struct pci_dev *pdev)
4357{
4358 int status = 0;
4359 struct be_adapter *adapter = pci_get_drvdata(pdev);
4360 struct net_device *netdev = adapter->netdev;
4361
4362 netif_device_detach(netdev);
4363
4364 status = pci_enable_device(pdev);
4365 if (status)
4366 return status;
4367
4368 pci_set_power_state(pdev, 0);
4369 pci_restore_state(pdev);
4370
Sathya Perla2243e2e2009-11-22 22:02:03 +00004371 /* tell fw we're ready to fire cmds */
4372 status = be_cmd_fw_init(adapter);
4373 if (status)
4374 return status;
4375
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004376 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004377 if (netif_running(netdev)) {
4378 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004379 be_open(netdev);
4380 rtnl_unlock();
4381 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004382
4383 schedule_delayed_work(&adapter->func_recovery_work,
4384 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004385 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004386
4387 if (adapter->wol)
4388 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004389
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004390 return 0;
4391}
4392
Sathya Perla82456b02010-02-17 01:35:37 +00004393/*
4394 * An FLR will stop BE from DMAing any data.
4395 */
4396static void be_shutdown(struct pci_dev *pdev)
4397{
4398 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004399
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004400 if (!adapter)
4401 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004402
Sathya Perla0f4a6822011-03-21 20:49:28 +00004403 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004404 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004405
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004406 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004407
Ajit Khaparde57841862011-04-06 18:08:43 +00004408 be_cmd_reset_function(adapter);
4409
Sathya Perla82456b02010-02-17 01:35:37 +00004410 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004411}
4412
Sathya Perlacf588472010-02-14 21:22:01 +00004413static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4414 pci_channel_state_t state)
4415{
4416 struct be_adapter *adapter = pci_get_drvdata(pdev);
4417 struct net_device *netdev = adapter->netdev;
4418
4419 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4420
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004421 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004422
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004423 cancel_delayed_work_sync(&adapter->func_recovery_work);
4424
4425 rtnl_lock();
Sathya Perlacf588472010-02-14 21:22:01 +00004426 netif_device_detach(netdev);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004427 rtnl_unlock();
Sathya Perlacf588472010-02-14 21:22:01 +00004428
4429 if (netif_running(netdev)) {
4430 rtnl_lock();
4431 be_close(netdev);
4432 rtnl_unlock();
4433 }
4434 be_clear(adapter);
4435
4436 if (state == pci_channel_io_perm_failure)
4437 return PCI_ERS_RESULT_DISCONNECT;
4438
4439 pci_disable_device(pdev);
4440
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004441 /* The error could cause the FW to trigger a flash debug dump.
4442 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004443 * can cause it not to recover; wait for it to finish.
4444 * Wait only for first function as it is needed only once per
4445 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004446 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004447 if (pdev->devfn == 0)
4448 ssleep(30);
4449
Sathya Perlacf588472010-02-14 21:22:01 +00004450 return PCI_ERS_RESULT_NEED_RESET;
4451}
4452
4453static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4454{
4455 struct be_adapter *adapter = pci_get_drvdata(pdev);
4456 int status;
4457
4458 dev_info(&adapter->pdev->dev, "EEH reset\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004459 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004460
4461 status = pci_enable_device(pdev);
4462 if (status)
4463 return PCI_ERS_RESULT_DISCONNECT;
4464
4465 pci_set_master(pdev);
4466 pci_set_power_state(pdev, 0);
4467 pci_restore_state(pdev);
4468
4469 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004470 dev_info(&adapter->pdev->dev,
4471 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004472 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004473 if (status)
4474 return PCI_ERS_RESULT_DISCONNECT;
4475
Sathya Perlad6b6d982012-09-05 01:56:48 +00004476 pci_cleanup_aer_uncorrect_error_status(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004477 return PCI_ERS_RESULT_RECOVERED;
4478}
4479
4480static void be_eeh_resume(struct pci_dev *pdev)
4481{
4482 int status = 0;
4483 struct be_adapter *adapter = pci_get_drvdata(pdev);
4484 struct net_device *netdev = adapter->netdev;
4485
4486 dev_info(&adapter->pdev->dev, "EEH resume\n");
4487
4488 pci_save_state(pdev);
4489
Kalesh AP2d177be2013-04-28 22:22:29 +00004490 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004491 if (status)
4492 goto err;
4493
Kalesh AP2d177be2013-04-28 22:22:29 +00004494 /* tell fw we're ready to fire cmds */
4495 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004496 if (status)
4497 goto err;
4498
Sathya Perlacf588472010-02-14 21:22:01 +00004499 status = be_setup(adapter);
4500 if (status)
4501 goto err;
4502
4503 if (netif_running(netdev)) {
4504 status = be_open(netdev);
4505 if (status)
4506 goto err;
4507 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004508
4509 schedule_delayed_work(&adapter->func_recovery_work,
4510 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004511 netif_device_attach(netdev);
4512 return;
4513err:
4514 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004515}
4516
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004517static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004518 .error_detected = be_eeh_err_detected,
4519 .slot_reset = be_eeh_reset,
4520 .resume = be_eeh_resume,
4521};
4522
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004523static struct pci_driver be_driver = {
4524 .name = DRV_NAME,
4525 .id_table = be_dev_ids,
4526 .probe = be_probe,
4527 .remove = be_remove,
4528 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004529 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004530 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004531 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004532};
4533
4534static int __init be_init_module(void)
4535{
Joe Perches8e95a202009-12-03 07:58:21 +00004536 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4537 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004538 printk(KERN_WARNING DRV_NAME
4539 " : Module param rx_frag_size must be 2048/4096/8192."
4540 " Using 2048\n");
4541 rx_frag_size = 2048;
4542 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004543
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004544 return pci_register_driver(&be_driver);
4545}
4546module_init(be_init_module);
4547
4548static void __exit be_exit_module(void)
4549{
4550 pci_unregister_driver(&be_driver);
4551}
4552module_exit(be_exit_module);