blob: 84f419fcde267407c89b1c67bc0454f7bf21c4bd [file] [log] [blame]
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001/* bnx2x_main.c: Broadcom Everest network driver.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002 *
Dmitry Kravkov5de92402011-05-04 23:51:13 +00003 * Copyright (c) 2007-2011 Broadcom Corporation
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
Eilon Greenstein24e3fce2008-06-12 14:30:28 -07009 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
Eilon Greensteinca003922009-08-12 22:53:28 -070013 * Slowpath and fastpath rework by Vladislav Zolotarov
Eliezer Tamirc14423f2008-02-28 11:49:42 -080014 * Statistics and Link management by Yitchak Gertner
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020015 *
16 */
17
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020018#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020026#include <linux/interrupt.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/dma-mapping.h>
33#include <linux/bitops.h>
34#include <linux/irq.h>
35#include <linux/delay.h>
36#include <asm/byteorder.h>
37#include <linux/time.h>
38#include <linux/ethtool.h>
39#include <linux/mii.h>
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080040#include <linux/if_vlan.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020041#include <net/ip.h>
42#include <net/tcp.h>
43#include <net/checksum.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070044#include <net/ip6_checksum.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020045#include <linux/workqueue.h>
46#include <linux/crc32.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070047#include <linux/crc32c.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020048#include <linux/prefetch.h>
49#include <linux/zlib.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020050#include <linux/io.h>
Ben Hutchings45229b42009-11-07 11:53:39 +000051#include <linux/stringify.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020052
Dmitry Kravkovb0efbb92010-07-27 12:33:43 +000053#define BNX2X_MAIN
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020054#include "bnx2x.h"
55#include "bnx2x_init.h"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070056#include "bnx2x_init_ops.h"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000057#include "bnx2x_cmn.h"
Vladislav Zolotarove4901dd2010-12-13 05:44:18 +000058#include "bnx2x_dcb.h"
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000059#include "bnx2x_sp.h"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020060
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070061#include <linux/firmware.h>
62#include "bnx2x_fw_file_hdr.h"
63/* FW files */
Ben Hutchings45229b42009-11-07 11:53:39 +000064#define FW_FILE_VERSION \
65 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
66 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
67 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
68 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
Dmitry Kravkov560131f2010-10-06 03:18:47 +000069#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
70#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000071#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070072
Eilon Greenstein34f80b02008-06-23 20:33:01 -070073/* Time in jiffies before concluding the transmitter is hung */
74#define TX_TIMEOUT (5*HZ)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020075
Andrew Morton53a10562008-02-09 23:16:41 -080076static char version[] __devinitdata =
Eilon Greenstein34f80b02008-06-23 20:33:01 -070077 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020078 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
79
Eilon Greenstein24e3fce2008-06-12 14:30:28 -070080MODULE_AUTHOR("Eliezer Tamir");
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000081MODULE_DESCRIPTION("Broadcom NetXtreme II "
82 "BCM57710/57711/57711E/57712/57712E Driver");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020083MODULE_LICENSE("GPL");
84MODULE_VERSION(DRV_MODULE_VERSION);
Ben Hutchings45229b42009-11-07 11:53:39 +000085MODULE_FIRMWARE(FW_FILE_NAME_E1);
86MODULE_FIRMWARE(FW_FILE_NAME_E1H);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000087MODULE_FIRMWARE(FW_FILE_NAME_E2);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020088
Eilon Greenstein555f6c72009-02-12 08:36:11 +000089static int multi_mode = 1;
90module_param(multi_mode, int, 0);
Eilon Greensteinca003922009-08-12 22:53:28 -070091MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))");
93
Dmitry Kravkovd6214d72010-10-06 03:32:10 +000094int num_queues;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000095module_param(num_queues, int, 0);
96MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97 " (default is as a number of CPUs)");
Eilon Greenstein555f6c72009-02-12 08:36:11 +000098
Eilon Greenstein19680c42008-08-13 15:47:33 -070099static int disable_tpa;
Eilon Greenstein19680c42008-08-13 15:47:33 -0700100module_param(disable_tpa, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000101MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
Eilon Greenstein8badd272009-02-12 08:36:15 +0000102
Dmitry Kravkov9ee3d372011-06-14 01:33:34 +0000103#define INT_MODE_INTx 1
104#define INT_MODE_MSI 2
Eilon Greenstein8badd272009-02-12 08:36:15 +0000105static int int_mode;
106module_param(int_mode, int, 0);
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000107MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
108 "(1 INT#x; 2 MSI)");
Eilon Greenstein8badd272009-02-12 08:36:15 +0000109
Eilon Greensteina18f5122009-08-12 08:23:26 +0000110static int dropless_fc;
111module_param(dropless_fc, int, 0);
112MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
113
Eilon Greenstein9898f862009-02-12 08:38:27 +0000114static int poll;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200115module_param(poll, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000116MODULE_PARM_DESC(poll, " Use polling (for debug)");
Eilon Greenstein8d5726c2009-02-12 08:37:19 +0000117
118static int mrrs = -1;
119module_param(mrrs, int, 0);
120MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
121
Eilon Greenstein9898f862009-02-12 08:38:27 +0000122static int debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200123module_param(debug, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000124MODULE_PARM_DESC(debug, " Default debug msglevel");
125
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800126static struct workqueue_struct *bnx2x_wq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200127
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000128#ifdef BCM_CNIC
129static u8 ALL_ENODE_MACS[] = {0x01, 0x10, 0x18, 0x01, 0x00, 0x01};
130#endif
131
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200132enum bnx2x_board_type {
133 BCM57710 = 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700134 BCM57711 = 1,
135 BCM57711E = 2,
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000136 BCM57712 = 3,
137 BCM57712E = 4
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200138};
139
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700140/* indexed by board_type, above */
Andrew Morton53a10562008-02-09 23:16:41 -0800141static struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200142 char *name;
143} board_info[] __devinitdata = {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700144 { "Broadcom NetXtreme II BCM57710 XGb" },
145 { "Broadcom NetXtreme II BCM57711 XGb" },
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000146 { "Broadcom NetXtreme II BCM57711E XGb" },
147 { "Broadcom NetXtreme II BCM57712 XGb" },
148 { "Broadcom NetXtreme II BCM57712E XGb" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200149};
150
Alexey Dobriyana3aa1882010-01-07 11:58:11 +0000151static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
Eilon Greensteine4ed7112009-08-12 08:24:10 +0000152 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
153 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
154 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000155 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
156 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200157 { 0 }
158};
159
160MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
161
162/****************************************************************************
163* General service functions
164****************************************************************************/
165
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000166static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
167{
168 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
169}
170
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000171
172static inline void storm_memset_eq_data(struct bnx2x *bp,
173 struct event_ring_data *eq_data,
174 u16 pfid)
175{
176 size_t size = sizeof(struct event_ring_data);
177
178 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
179
180 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
181}
182
183static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
184 u16 pfid)
185{
186 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
187 REG_WR16(bp, addr, eq_prod);
188}
189
190static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
191 u16 fw_sb_id, u8 sb_index,
192 u8 ticks)
193{
194
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000195 int index_offset = CHIP_IS_E2(bp) ?
196 offsetof(struct hc_status_block_data_e2, index_data) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000197 offsetof(struct hc_status_block_data_e1x, index_data);
198 u32 addr = BAR_CSTRORM_INTMEM +
199 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
200 index_offset +
201 sizeof(struct hc_index_data)*sb_index +
202 offsetof(struct hc_index_data, timeout);
203 REG_WR8(bp, addr, ticks);
204 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
205 port, fw_sb_id, sb_index, ticks);
206}
207static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
208 u16 fw_sb_id, u8 sb_index,
209 u8 disable)
210{
211 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000212 int index_offset = CHIP_IS_E2(bp) ?
213 offsetof(struct hc_status_block_data_e2, index_data) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000214 offsetof(struct hc_status_block_data_e1x, index_data);
215 u32 addr = BAR_CSTRORM_INTMEM +
216 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
217 index_offset +
218 sizeof(struct hc_index_data)*sb_index +
219 offsetof(struct hc_index_data, flags);
220 u16 flags = REG_RD16(bp, addr);
221 /* clear and set */
222 flags &= ~HC_INDEX_DATA_HC_ENABLED;
223 flags |= enable_flag;
224 REG_WR16(bp, addr, flags);
225 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
226 port, fw_sb_id, sb_index, disable);
227}
228
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200229/* used only at init
230 * locking is done by mcp
231 */
stephen hemminger8d962862010-10-21 07:50:56 +0000232static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200233{
234 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
235 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
236 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
237 PCICFG_VENDOR_ID_OFFSET);
238}
239
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200240static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
241{
242 u32 val;
243
244 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
245 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
246 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
247 PCICFG_VENDOR_ID_OFFSET);
248
249 return val;
250}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200251
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000252#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
253#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
254#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
255#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
256#define DMAE_DP_DST_NONE "dst_addr [none]"
257
stephen hemminger8d962862010-10-21 07:50:56 +0000258static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
259 int msglvl)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000260{
261 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
262
263 switch (dmae->opcode & DMAE_COMMAND_DST) {
264 case DMAE_CMD_DST_PCI:
265 if (src_type == DMAE_CMD_SRC_PCI)
266 DP(msglvl, "DMAE: opcode 0x%08x\n"
267 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
268 "comp_addr [%x:%08x], comp_val 0x%08x\n",
269 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
270 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
271 dmae->comp_addr_hi, dmae->comp_addr_lo,
272 dmae->comp_val);
273 else
274 DP(msglvl, "DMAE: opcode 0x%08x\n"
275 "src [%08x], len [%d*4], dst [%x:%08x]\n"
276 "comp_addr [%x:%08x], comp_val 0x%08x\n",
277 dmae->opcode, dmae->src_addr_lo >> 2,
278 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
279 dmae->comp_addr_hi, dmae->comp_addr_lo,
280 dmae->comp_val);
281 break;
282 case DMAE_CMD_DST_GRC:
283 if (src_type == DMAE_CMD_SRC_PCI)
284 DP(msglvl, "DMAE: opcode 0x%08x\n"
285 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
286 "comp_addr [%x:%08x], comp_val 0x%08x\n",
287 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
288 dmae->len, dmae->dst_addr_lo >> 2,
289 dmae->comp_addr_hi, dmae->comp_addr_lo,
290 dmae->comp_val);
291 else
292 DP(msglvl, "DMAE: opcode 0x%08x\n"
293 "src [%08x], len [%d*4], dst [%08x]\n"
294 "comp_addr [%x:%08x], comp_val 0x%08x\n",
295 dmae->opcode, dmae->src_addr_lo >> 2,
296 dmae->len, dmae->dst_addr_lo >> 2,
297 dmae->comp_addr_hi, dmae->comp_addr_lo,
298 dmae->comp_val);
299 break;
300 default:
301 if (src_type == DMAE_CMD_SRC_PCI)
302 DP(msglvl, "DMAE: opcode 0x%08x\n"
303 DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
304 "dst_addr [none]\n"
305 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
306 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
307 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
308 dmae->comp_val);
309 else
310 DP(msglvl, "DMAE: opcode 0x%08x\n"
311 DP_LEVEL "src_addr [%08x] len [%d * 4] "
312 "dst_addr [none]\n"
313 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
314 dmae->opcode, dmae->src_addr_lo >> 2,
315 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
316 dmae->comp_val);
317 break;
318 }
319
320}
321
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000322const u32 dmae_reg_go_c[] = {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200323 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
324 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
325 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
326 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
327};
328
329/* copy command into DMAE command memory and set DMAE command go */
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000330void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200331{
332 u32 cmd_offset;
333 int i;
334
335 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
336 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
337 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
338
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700339 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
340 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200341 }
342 REG_WR(bp, dmae_reg_go_c[idx], 1);
343}
344
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000345u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
346{
347 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
348 DMAE_CMD_C_ENABLE);
349}
350
351u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
352{
353 return opcode & ~DMAE_CMD_SRC_RESET;
354}
355
356u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
357 bool with_comp, u8 comp_type)
358{
359 u32 opcode = 0;
360
361 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
362 (dst_type << DMAE_COMMAND_DST_SHIFT));
363
364 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
365
366 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
367 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
368 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
369 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
370
371#ifdef __BIG_ENDIAN
372 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
373#else
374 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
375#endif
376 if (with_comp)
377 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
378 return opcode;
379}
380
stephen hemminger8d962862010-10-21 07:50:56 +0000381static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
382 struct dmae_command *dmae,
383 u8 src_type, u8 dst_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000384{
385 memset(dmae, 0, sizeof(struct dmae_command));
386
387 /* set the opcode */
388 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
389 true, DMAE_COMP_PCI);
390
391 /* fill in the completion parameters */
392 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
393 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
394 dmae->comp_val = DMAE_COMP_VAL;
395}
396
397/* issue a dmae command over the init-channel and wailt for completion */
stephen hemminger8d962862010-10-21 07:50:56 +0000398static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
399 struct dmae_command *dmae)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000400{
401 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
Dmitry Kravkov5e374b52011-05-22 10:09:19 +0000402 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000403 int rc = 0;
404
405 DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
406 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
407 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
408
409 /* lock the dmae channel */
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -0800410 spin_lock_bh(&bp->dmae_lock);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000411
412 /* reset completion */
413 *wb_comp = 0;
414
415 /* post the command on the channel used for initializations */
416 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
417
418 /* wait for completion */
419 udelay(5);
420 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
421 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
422
423 if (!cnt) {
424 BNX2X_ERR("DMAE timeout!\n");
425 rc = DMAE_TIMEOUT;
426 goto unlock;
427 }
428 cnt--;
429 udelay(50);
430 }
431 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
432 BNX2X_ERR("DMAE PCI error!\n");
433 rc = DMAE_PCI_ERROR;
434 }
435
436 DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
437 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
438 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
439
440unlock:
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -0800441 spin_unlock_bh(&bp->dmae_lock);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000442 return rc;
443}
444
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700445void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
446 u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200447{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000448 struct dmae_command dmae;
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700449
450 if (!bp->dmae_ready) {
451 u32 *data = bnx2x_sp(bp, wb_data[0]);
452
453 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
454 " using indirect\n", dst_addr, len32);
455 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
456 return;
457 }
458
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000459 /* set opcode and fixed command fields */
460 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200461
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000462 /* fill in addresses and len */
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000463 dmae.src_addr_lo = U64_LO(dma_addr);
464 dmae.src_addr_hi = U64_HI(dma_addr);
465 dmae.dst_addr_lo = dst_addr >> 2;
466 dmae.dst_addr_hi = 0;
467 dmae.len = len32;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200468
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000469 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200470
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000471 /* issue the command and wait for completion */
472 bnx2x_issue_dmae_with_comp(bp, &dmae);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200473}
474
Yaniv Rosnerc18487e2008-06-23 20:27:52 -0700475void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200476{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000477 struct dmae_command dmae;
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700478
479 if (!bp->dmae_ready) {
480 u32 *data = bnx2x_sp(bp, wb_data[0]);
481 int i;
482
483 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
484 " using indirect\n", src_addr, len32);
485 for (i = 0; i < len32; i++)
486 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
487 return;
488 }
489
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000490 /* set opcode and fixed command fields */
491 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200492
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000493 /* fill in addresses and len */
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000494 dmae.src_addr_lo = src_addr >> 2;
495 dmae.src_addr_hi = 0;
496 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
497 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
498 dmae.len = len32;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200499
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000500 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200501
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000502 /* issue the command and wait for completion */
503 bnx2x_issue_dmae_with_comp(bp, &dmae);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200504}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200505
stephen hemminger8d962862010-10-21 07:50:56 +0000506static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
507 u32 addr, u32 len)
Eilon Greenstein573f2032009-08-12 08:24:14 +0000508{
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000509 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
Eilon Greenstein573f2032009-08-12 08:24:14 +0000510 int offset = 0;
511
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000512 while (len > dmae_wr_max) {
Eilon Greenstein573f2032009-08-12 08:24:14 +0000513 bnx2x_write_dmae(bp, phys_addr + offset,
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000514 addr + offset, dmae_wr_max);
515 offset += dmae_wr_max * 4;
516 len -= dmae_wr_max;
Eilon Greenstein573f2032009-08-12 08:24:14 +0000517 }
518
519 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
520}
521
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700522/* used only for slowpath so not inlined */
523static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
524{
525 u32 wb_write[2];
526
527 wb_write[0] = val_hi;
528 wb_write[1] = val_lo;
529 REG_WR_DMAE(bp, reg, wb_write, 2);
530}
531
532#ifdef USE_WB_RD
533static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
534{
535 u32 wb_data[2];
536
537 REG_RD_DMAE(bp, reg, wb_data, 2);
538
539 return HILO_U64(wb_data[0], wb_data[1]);
540}
541#endif
542
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200543static int bnx2x_mc_assert(struct bnx2x *bp)
544{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200545 char last_idx;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700546 int i, rc = 0;
547 u32 row0, row1, row2, row3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200548
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700549 /* XSTORM */
550 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
551 XSTORM_ASSERT_LIST_INDEX_OFFSET);
552 if (last_idx)
553 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200554
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700555 /* print the asserts */
556 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200557
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700558 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
559 XSTORM_ASSERT_LIST_OFFSET(i));
560 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
561 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
562 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
563 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
564 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
565 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200566
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700567 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
568 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
569 " 0x%08x 0x%08x 0x%08x\n",
570 i, row3, row2, row1, row0);
571 rc++;
572 } else {
573 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200574 }
575 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700576
577 /* TSTORM */
578 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
579 TSTORM_ASSERT_LIST_INDEX_OFFSET);
580 if (last_idx)
581 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
582
583 /* print the asserts */
584 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
585
586 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
587 TSTORM_ASSERT_LIST_OFFSET(i));
588 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
589 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
590 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
591 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
592 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
593 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
594
595 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
596 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
597 " 0x%08x 0x%08x 0x%08x\n",
598 i, row3, row2, row1, row0);
599 rc++;
600 } else {
601 break;
602 }
603 }
604
605 /* CSTORM */
606 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
607 CSTORM_ASSERT_LIST_INDEX_OFFSET);
608 if (last_idx)
609 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
610
611 /* print the asserts */
612 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
613
614 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
615 CSTORM_ASSERT_LIST_OFFSET(i));
616 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
617 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
618 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
619 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
620 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
621 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
622
623 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
624 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
625 " 0x%08x 0x%08x 0x%08x\n",
626 i, row3, row2, row1, row0);
627 rc++;
628 } else {
629 break;
630 }
631 }
632
633 /* USTORM */
634 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
635 USTORM_ASSERT_LIST_INDEX_OFFSET);
636 if (last_idx)
637 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
638
639 /* print the asserts */
640 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
641
642 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
643 USTORM_ASSERT_LIST_OFFSET(i));
644 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
645 USTORM_ASSERT_LIST_OFFSET(i) + 4);
646 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
647 USTORM_ASSERT_LIST_OFFSET(i) + 8);
648 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
649 USTORM_ASSERT_LIST_OFFSET(i) + 12);
650
651 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
652 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
653 " 0x%08x 0x%08x 0x%08x\n",
654 i, row3, row2, row1, row0);
655 rc++;
656 } else {
657 break;
658 }
659 }
660
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200661 return rc;
662}
Eliezer Tamirc14423f2008-02-28 11:49:42 -0800663
Dmitry Kravkov7a25cc72011-06-14 01:33:25 +0000664void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200665{
Dmitry Kravkov7a25cc72011-06-14 01:33:25 +0000666 u32 addr, val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200667 u32 mark, offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +0000668 __be32 data[9];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200669 int word;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000670 u32 trace_shmem_base;
Vladislav Zolotarov2145a922010-04-19 01:13:49 +0000671 if (BP_NOMCP(bp)) {
672 BNX2X_ERR("NO MCP - can not dump\n");
673 return;
674 }
Dmitry Kravkov7a25cc72011-06-14 01:33:25 +0000675 netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
676 (bp->common.bc_ver & 0xff0000) >> 16,
677 (bp->common.bc_ver & 0xff00) >> 8,
678 (bp->common.bc_ver & 0xff));
679
680 val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
681 if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
682 printk("%s" "MCP PC at 0x%x\n", lvl, val);
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000683
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000684 if (BP_PATH(bp) == 0)
685 trace_shmem_base = bp->common.shmem_base;
686 else
687 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
688 addr = trace_shmem_base - 0x0800 + 4;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000689 mark = REG_RD(bp, addr);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000690 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
691 + ((mark + 0x3) & ~0x3) - 0x08000000;
Dmitry Kravkov7a25cc72011-06-14 01:33:25 +0000692 printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200693
Dmitry Kravkov7a25cc72011-06-14 01:33:25 +0000694 printk("%s", lvl);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000695 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200696 for (word = 0; word < 8; word++)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000697 data[word] = htonl(REG_RD(bp, offset + 4*word));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200698 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000699 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200700 }
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000701 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200702 for (word = 0; word < 8; word++)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000703 data[word] = htonl(REG_RD(bp, offset + 4*word));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200704 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000705 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200706 }
Dmitry Kravkov7a25cc72011-06-14 01:33:25 +0000707 printk("%s" "end of fw dump\n", lvl);
708}
709
710static inline void bnx2x_fw_dump(struct bnx2x *bp)
711{
712 bnx2x_fw_dump_lvl(bp, KERN_ERR);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200713}
714
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000715void bnx2x_panic_dump(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200716{
717 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000718 u16 j;
719 struct hc_sp_status_block_data sp_sb_data;
720 int func = BP_FUNC(bp);
721#ifdef BNX2X_STOP_ON_ERROR
722 u16 start = 0, end = 0;
723#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200724
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700725 bp->stats_state = STATS_STATE_DISABLED;
726 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
727
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200728 BNX2X_ERR("begin crash dump -----------------\n");
729
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000730 /* Indices */
731 /* Common */
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000732 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000733 " spq_prod_idx(0x%x)\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000734 bp->def_idx, bp->def_att_idx,
735 bp->attn_state, bp->spq_prod_idx);
736 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
737 bp->def_status_blk->atten_status_block.attn_bits,
738 bp->def_status_blk->atten_status_block.attn_bits_ack,
739 bp->def_status_blk->atten_status_block.status_block_id,
740 bp->def_status_blk->atten_status_block.attn_bits_index);
741 BNX2X_ERR(" def (");
742 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
743 pr_cont("0x%x%s",
744 bp->def_status_blk->sp_sb.index_values[i],
745 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000746
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000747 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
748 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
749 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
750 i*sizeof(u32));
751
752 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
753 "pf_id(0x%x) vnic_id(0x%x) "
754 "vf_id(0x%x) vf_valid (0x%x)\n",
755 sp_sb_data.igu_sb_id,
756 sp_sb_data.igu_seg_id,
757 sp_sb_data.p_func.pf_id,
758 sp_sb_data.p_func.vnic_id,
759 sp_sb_data.p_func.vf_id,
760 sp_sb_data.p_func.vf_valid);
761
762
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000763 for_each_eth_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000764 struct bnx2x_fastpath *fp = &bp->fp[i];
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000765 int loop;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000766 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000767 struct hc_status_block_data_e1x sb_data_e1x;
768 struct hc_status_block_sm *hc_sm_p =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000769 CHIP_IS_E2(bp) ?
770 sb_data_e2.common.state_machine :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000771 sb_data_e1x.common.state_machine;
772 struct hc_index_data *hc_index_p =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000773 CHIP_IS_E2(bp) ?
774 sb_data_e2.index_data :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000775 sb_data_e1x.index_data;
776 int data_size;
777 u32 *sb_data_p;
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000778
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000779 /* Rx */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000780 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000781 " rx_comp_prod(0x%x)"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000782 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000783 i, fp->rx_bd_prod, fp->rx_bd_cons,
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000784 fp->rx_comp_prod,
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000785 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000786 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000787 " fp_hc_idx(0x%x)\n",
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000788 fp->rx_sge_prod, fp->last_max_sge,
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000789 le16_to_cpu(fp->fp_hc_idx));
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000790
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000791 /* Tx */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000792 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
793 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
794 " *tx_cons_sb(0x%x)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200795 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700796 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000797
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000798 loop = CHIP_IS_E2(bp) ?
799 HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000800
801 /* host sb data */
802
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000803#ifdef BCM_CNIC
804 if (IS_FCOE_FP(fp))
805 continue;
806#endif
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000807 BNX2X_ERR(" run indexes (");
808 for (j = 0; j < HC_SB_MAX_SM; j++)
809 pr_cont("0x%x%s",
810 fp->sb_running_index[j],
811 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
812
813 BNX2X_ERR(" indexes (");
814 for (j = 0; j < loop; j++)
815 pr_cont("0x%x%s",
816 fp->sb_index_values[j],
817 (j == loop - 1) ? ")" : " ");
818 /* fw sb data */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000819 data_size = CHIP_IS_E2(bp) ?
820 sizeof(struct hc_status_block_data_e2) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000821 sizeof(struct hc_status_block_data_e1x);
822 data_size /= sizeof(u32);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000823 sb_data_p = CHIP_IS_E2(bp) ?
824 (u32 *)&sb_data_e2 :
825 (u32 *)&sb_data_e1x;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000826 /* copy sb data in here */
827 for (j = 0; j < data_size; j++)
828 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
829 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
830 j * sizeof(u32));
831
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000832 if (CHIP_IS_E2(bp)) {
833 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
834 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
835 sb_data_e2.common.p_func.pf_id,
836 sb_data_e2.common.p_func.vf_id,
837 sb_data_e2.common.p_func.vf_valid,
838 sb_data_e2.common.p_func.vnic_id,
839 sb_data_e2.common.same_igu_sb_1b);
840 } else {
841 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
842 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
843 sb_data_e1x.common.p_func.pf_id,
844 sb_data_e1x.common.p_func.vf_id,
845 sb_data_e1x.common.p_func.vf_valid,
846 sb_data_e1x.common.p_func.vnic_id,
847 sb_data_e1x.common.same_igu_sb_1b);
848 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000849
850 /* SB_SMs data */
851 for (j = 0; j < HC_SB_MAX_SM; j++) {
852 pr_cont("SM[%d] __flags (0x%x) "
853 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
854 "time_to_expire (0x%x) "
855 "timer_value(0x%x)\n", j,
856 hc_sm_p[j].__flags,
857 hc_sm_p[j].igu_sb_id,
858 hc_sm_p[j].igu_seg_id,
859 hc_sm_p[j].time_to_expire,
860 hc_sm_p[j].timer_value);
861 }
862
863 /* Indecies data */
864 for (j = 0; j < loop; j++) {
865 pr_cont("INDEX[%d] flags (0x%x) "
866 "timeout (0x%x)\n", j,
867 hc_index_p[j].flags,
868 hc_index_p[j].timeout);
869 }
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000870 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200871
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000872#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000873 /* Rings */
874 /* Rx */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000875 for_each_rx_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000876 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200877
878 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
879 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000880 for (j = start; j != end; j = RX_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200881 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
882 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
883
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000884 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
885 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200886 }
887
Eilon Greenstein3196a882008-08-13 15:58:49 -0700888 start = RX_SGE(fp->rx_sge_prod);
889 end = RX_SGE(fp->last_max_sge);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000890 for (j = start; j != end; j = RX_SGE(j + 1)) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -0700891 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
892 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
893
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000894 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
895 i, j, rx_sge[1], rx_sge[0], sw_page->page);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -0700896 }
897
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200898 start = RCQ_BD(fp->rx_comp_cons - 10);
899 end = RCQ_BD(fp->rx_comp_cons + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000900 for (j = start; j != end; j = RCQ_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200901 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
902
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000903 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
904 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200905 }
906 }
907
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000908 /* Tx */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000909 for_each_tx_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000910 struct bnx2x_fastpath *fp = &bp->fp[i];
911
912 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
913 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
914 for (j = start; j != end; j = TX_BD(j + 1)) {
915 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
916
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000917 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
918 i, j, sw_bd->skb, sw_bd->first_bd);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000919 }
920
921 start = TX_BD(fp->tx_bd_cons - 10);
922 end = TX_BD(fp->tx_bd_cons + 254);
923 for (j = start; j != end; j = TX_BD(j + 1)) {
924 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
925
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000926 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
927 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000928 }
929 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000930#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700931 bnx2x_fw_dump(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200932 bnx2x_mc_assert(bp);
933 BNX2X_ERR("end crash dump -----------------\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200934}
935
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000936static void bnx2x_hc_int_enable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200937{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700938 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200939 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
940 u32 val = REG_RD(bp, addr);
941 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +0000942 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200943
944 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000945 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
946 HC_CONFIG_0_REG_INT_LINE_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200947 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
948 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eilon Greenstein8badd272009-02-12 08:36:15 +0000949 } else if (msi) {
950 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
951 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
952 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
953 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200954 } else {
955 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800956 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200957 HC_CONFIG_0_REG_INT_LINE_EN_0 |
958 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800959
Dmitry Kravkova0fd0652010-10-19 05:13:05 +0000960 if (!CHIP_IS_E1(bp)) {
961 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
962 val, port, addr);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800963
Dmitry Kravkova0fd0652010-10-19 05:13:05 +0000964 REG_WR(bp, addr, val);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800965
Dmitry Kravkova0fd0652010-10-19 05:13:05 +0000966 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
967 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200968 }
969
Dmitry Kravkova0fd0652010-10-19 05:13:05 +0000970 if (CHIP_IS_E1(bp))
971 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
972
Eilon Greenstein8badd272009-02-12 08:36:15 +0000973 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
974 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200975
976 REG_WR(bp, addr, val);
Eilon Greenstein37dbbf32009-07-21 05:47:33 +0000977 /*
978 * Ensure that HC_CONFIG is written before leading/trailing edge config
979 */
980 mmiowb();
981 barrier();
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700982
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000983 if (!CHIP_IS_E1(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700984 /* init leading/trailing edge */
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +0000985 if (IS_MF(bp)) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000986 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700987 if (bp->port.pmf)
Eilon Greenstein4acac6a2009-02-12 08:36:52 +0000988 /* enable nig and gpio3 attention */
989 val |= 0x1100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700990 } else
991 val = 0xffff;
992
993 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
994 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
995 }
Eilon Greenstein37dbbf32009-07-21 05:47:33 +0000996
997 /* Make sure that interrupts are indeed enabled from here on */
998 mmiowb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200999}
1000
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001001static void bnx2x_igu_int_enable(struct bnx2x *bp)
1002{
1003 u32 val;
1004 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1005 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1006
1007 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1008
1009 if (msix) {
1010 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1011 IGU_PF_CONF_SINGLE_ISR_EN);
1012 val |= (IGU_PF_CONF_FUNC_EN |
1013 IGU_PF_CONF_MSI_MSIX_EN |
1014 IGU_PF_CONF_ATTN_BIT_EN);
1015 } else if (msi) {
1016 val &= ~IGU_PF_CONF_INT_LINE_EN;
1017 val |= (IGU_PF_CONF_FUNC_EN |
1018 IGU_PF_CONF_MSI_MSIX_EN |
1019 IGU_PF_CONF_ATTN_BIT_EN |
1020 IGU_PF_CONF_SINGLE_ISR_EN);
1021 } else {
1022 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1023 val |= (IGU_PF_CONF_FUNC_EN |
1024 IGU_PF_CONF_INT_LINE_EN |
1025 IGU_PF_CONF_ATTN_BIT_EN |
1026 IGU_PF_CONF_SINGLE_ISR_EN);
1027 }
1028
1029 DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
1030 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1031
1032 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1033
1034 barrier();
1035
1036 /* init leading/trailing edge */
1037 if (IS_MF(bp)) {
1038 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1039 if (bp->port.pmf)
1040 /* enable nig and gpio3 attention */
1041 val |= 0x1100;
1042 } else
1043 val = 0xffff;
1044
1045 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1046 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1047
1048 /* Make sure that interrupts are indeed enabled from here on */
1049 mmiowb();
1050}
1051
1052void bnx2x_int_enable(struct bnx2x *bp)
1053{
1054 if (bp->common.int_block == INT_BLOCK_HC)
1055 bnx2x_hc_int_enable(bp);
1056 else
1057 bnx2x_igu_int_enable(bp);
1058}
1059
1060static void bnx2x_hc_int_disable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001061{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001062 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001063 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1064 u32 val = REG_RD(bp, addr);
1065
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001066 /*
1067 * in E1 we must use only PCI configuration space to disable
1068 * MSI/MSIX capablility
1069 * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
1070 */
1071 if (CHIP_IS_E1(bp)) {
1072 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
1073 * Use mask register to prevent from HC sending interrupts
1074 * after we exit the function
1075 */
1076 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
1077
1078 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1079 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1080 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1081 } else
1082 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1083 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1084 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1085 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001086
1087 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1088 val, port, addr);
1089
Eilon Greenstein8badd272009-02-12 08:36:15 +00001090 /* flush all outstanding writes */
1091 mmiowb();
1092
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001093 REG_WR(bp, addr, val);
1094 if (REG_RD(bp, addr) != val)
1095 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1096}
1097
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001098static void bnx2x_igu_int_disable(struct bnx2x *bp)
1099{
1100 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1101
1102 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1103 IGU_PF_CONF_INT_LINE_EN |
1104 IGU_PF_CONF_ATTN_BIT_EN);
1105
1106 DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1107
1108 /* flush all outstanding writes */
1109 mmiowb();
1110
1111 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1112 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1113 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1114}
1115
stephen hemminger8d962862010-10-21 07:50:56 +00001116static void bnx2x_int_disable(struct bnx2x *bp)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001117{
1118 if (bp->common.int_block == INT_BLOCK_HC)
1119 bnx2x_hc_int_disable(bp);
1120 else
1121 bnx2x_igu_int_disable(bp);
1122}
1123
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001124void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001125{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001126 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +00001127 int i, offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001128
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07001129 if (disable_hw)
1130 /* prevent the HW from sending interrupts */
1131 bnx2x_int_disable(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001132
1133 /* make sure all ISRs are done */
1134 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001135 synchronize_irq(bp->msix_table[0].vector);
1136 offset = 1;
Michael Chan37b091b2009-10-10 13:46:55 +00001137#ifdef BCM_CNIC
1138 offset++;
1139#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001140 for_each_eth_queue(bp, i)
Eilon Greenstein8badd272009-02-12 08:36:15 +00001141 synchronize_irq(bp->msix_table[i + offset].vector);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001142 } else
1143 synchronize_irq(bp->pdev->irq);
1144
1145 /* make sure sp_task is not running */
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001146 cancel_delayed_work(&bp->sp_task);
1147 flush_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001148}
1149
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001150/* fast path */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001151
1152/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001153 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001154 */
1155
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001156/* Return true if succeeded to acquire the lock */
1157static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1158{
1159 u32 lock_status;
1160 u32 resource_bit = (1 << resource);
1161 int func = BP_FUNC(bp);
1162 u32 hw_lock_control_reg;
1163
1164 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1165
1166 /* Validating that the resource is within range */
1167 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1168 DP(NETIF_MSG_HW,
1169 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1170 resource, HW_LOCK_MAX_RESOURCE_VALUE);
Eric Dumazet0fdf4d02010-08-26 22:03:53 -07001171 return false;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001172 }
1173
1174 if (func <= 5)
1175 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1176 else
1177 hw_lock_control_reg =
1178 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1179
1180 /* Try to acquire the lock */
1181 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1182 lock_status = REG_RD(bp, hw_lock_control_reg);
1183 if (lock_status & resource_bit)
1184 return true;
1185
1186 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1187 return false;
1188}
1189
Michael Chan993ac7b2009-10-10 13:46:56 +00001190#ifdef BCM_CNIC
1191static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1192#endif
Eilon Greenstein3196a882008-08-13 15:58:49 -07001193
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001194void bnx2x_sp_event(struct bnx2x_fastpath *fp,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001195 union eth_rx_cqe *rr_cqe)
1196{
1197 struct bnx2x *bp = fp->bp;
1198 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1199 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1200
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001201 DP(BNX2X_MSG_SP,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001202 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +00001203 fp->index, cid, command, bp->state,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001204 rr_cqe->ramrod_cqe.ramrod_type);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001205
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001206 switch (command | fp->state) {
1207 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1208 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1209 fp->state = BNX2X_FP_STATE_OPEN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001210 break;
1211
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001212 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1213 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001214 fp->state = BNX2X_FP_STATE_HALTED;
1215 break;
1216
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001217 case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1218 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1219 fp->state = BNX2X_FP_STATE_TERMINATED;
Eliezer Tamir49d66772008-02-28 11:53:13 -08001220 break;
1221
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001222 default:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001223 BNX2X_ERR("unexpected MC reply (%d) "
1224 "fp[%d] state is %x\n",
1225 command, fp->index, fp->state);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001226 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001227 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001228
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00001229 smp_mb__before_atomic_inc();
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08001230 atomic_inc(&bp->cq_spq_left);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001231 /* push the change in fp->state and towards the memory */
1232 smp_wmb();
1233
1234 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001235}
1236
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001237irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001238{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001239 struct bnx2x *bp = netdev_priv(dev_instance);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001240 u16 status = bnx2x_ack_int(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001241 u16 mask;
Eilon Greensteinca003922009-08-12 22:53:28 -07001242 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001243
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001244 /* Return here if interrupt is shared and it's not for us */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001245 if (unlikely(status == 0)) {
1246 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1247 return IRQ_NONE;
1248 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001249 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001250
Eilon Greenstein3196a882008-08-13 15:58:49 -07001251#ifdef BNX2X_STOP_ON_ERROR
1252 if (unlikely(bp->panic))
1253 return IRQ_HANDLED;
1254#endif
1255
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001256 for_each_eth_queue(bp, i) {
Eilon Greensteinca003922009-08-12 22:53:28 -07001257 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001258
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001259 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
Eilon Greensteinca003922009-08-12 22:53:28 -07001260 if (status & mask) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001261 /* Handle Rx and Tx according to SB id */
1262 prefetch(fp->rx_cons_sb);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001263 prefetch(fp->tx_cons_sb);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001264 prefetch(&fp->sb_running_index[SM_RX_ID]);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001265 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
Eilon Greensteinca003922009-08-12 22:53:28 -07001266 status &= ~mask;
1267 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001268 }
1269
Michael Chan993ac7b2009-10-10 13:46:56 +00001270#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001271 mask = 0x2;
Michael Chan993ac7b2009-10-10 13:46:56 +00001272 if (status & (mask | 0x1)) {
1273 struct cnic_ops *c_ops = NULL;
1274
1275 rcu_read_lock();
1276 c_ops = rcu_dereference(bp->cnic_ops);
1277 if (c_ops)
1278 c_ops->cnic_handler(bp->cnic_data, NULL);
1279 rcu_read_unlock();
1280
1281 status &= ~mask;
1282 }
1283#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001284
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001285 if (unlikely(status & 0x1)) {
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001286 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001287
1288 status &= ~0x1;
1289 if (!status)
1290 return IRQ_HANDLED;
1291 }
1292
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00001293 if (unlikely(status))
1294 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001295 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001296
1297 return IRQ_HANDLED;
1298}
1299
1300/* end of fast path */
1301
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001302
1303/* Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001304
1305/*
1306 * General service functions
1307 */
1308
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001309int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001310{
Eliezer Tamirf1410642008-02-28 11:51:50 -08001311 u32 lock_status;
1312 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001313 int func = BP_FUNC(bp);
1314 u32 hw_lock_control_reg;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001315 int cnt;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001316
1317 /* Validating that the resource is within range */
1318 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1319 DP(NETIF_MSG_HW,
1320 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1321 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1322 return -EINVAL;
1323 }
1324
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001325 if (func <= 5) {
1326 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1327 } else {
1328 hw_lock_control_reg =
1329 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1330 }
1331
Eliezer Tamirf1410642008-02-28 11:51:50 -08001332 /* Validating that the resource is not already taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001333 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001334 if (lock_status & resource_bit) {
1335 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1336 lock_status, resource_bit);
1337 return -EEXIST;
1338 }
1339
Eilon Greenstein46230476b2008-08-25 15:23:30 -07001340 /* Try for 5 second every 5ms */
1341 for (cnt = 0; cnt < 1000; cnt++) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08001342 /* Try to acquire the lock */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001343 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1344 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001345 if (lock_status & resource_bit)
1346 return 0;
1347
1348 msleep(5);
1349 }
1350 DP(NETIF_MSG_HW, "Timeout\n");
1351 return -EAGAIN;
1352}
1353
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001354int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001355{
1356 u32 lock_status;
1357 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001358 int func = BP_FUNC(bp);
1359 u32 hw_lock_control_reg;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001360
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001361 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1362
Eliezer Tamirf1410642008-02-28 11:51:50 -08001363 /* Validating that the resource is within range */
1364 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1365 DP(NETIF_MSG_HW,
1366 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1367 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1368 return -EINVAL;
1369 }
1370
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001371 if (func <= 5) {
1372 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1373 } else {
1374 hw_lock_control_reg =
1375 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1376 }
1377
Eliezer Tamirf1410642008-02-28 11:51:50 -08001378 /* Validating that the resource is currently taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001379 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001380 if (!(lock_status & resource_bit)) {
1381 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1382 lock_status, resource_bit);
1383 return -EFAULT;
1384 }
1385
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001386 REG_WR(bp, hw_lock_control_reg, resource_bit);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001387 return 0;
1388}
1389
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001390
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001391int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1392{
1393 /* The GPIO should be swapped if swap register is set and active */
1394 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1395 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1396 int gpio_shift = gpio_num +
1397 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1398 u32 gpio_mask = (1 << gpio_shift);
1399 u32 gpio_reg;
1400 int value;
1401
1402 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1403 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1404 return -EINVAL;
1405 }
1406
1407 /* read GPIO value */
1408 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1409
1410 /* get the requested pin value */
1411 if ((gpio_reg & gpio_mask) == gpio_mask)
1412 value = 1;
1413 else
1414 value = 0;
1415
1416 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1417
1418 return value;
1419}
1420
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001421int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001422{
1423 /* The GPIO should be swapped if swap register is set and active */
1424 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001425 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001426 int gpio_shift = gpio_num +
1427 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1428 u32 gpio_mask = (1 << gpio_shift);
1429 u32 gpio_reg;
1430
1431 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1432 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1433 return -EINVAL;
1434 }
1435
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001436 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001437 /* read GPIO and mask except the float bits */
1438 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1439
1440 switch (mode) {
1441 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1442 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1443 gpio_num, gpio_shift);
1444 /* clear FLOAT and set CLR */
1445 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1446 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1447 break;
1448
1449 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1450 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1451 gpio_num, gpio_shift);
1452 /* clear FLOAT and set SET */
1453 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1454 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1455 break;
1456
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001457 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001458 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1459 gpio_num, gpio_shift);
1460 /* set FLOAT */
1461 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1462 break;
1463
1464 default:
1465 break;
1466 }
1467
1468 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001469 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001470
1471 return 0;
1472}
1473
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001474int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1475{
1476 /* The GPIO should be swapped if swap register is set and active */
1477 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1478 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1479 int gpio_shift = gpio_num +
1480 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1481 u32 gpio_mask = (1 << gpio_shift);
1482 u32 gpio_reg;
1483
1484 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1485 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1486 return -EINVAL;
1487 }
1488
1489 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1490 /* read GPIO int */
1491 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1492
1493 switch (mode) {
1494 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1495 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1496 "output low\n", gpio_num, gpio_shift);
1497 /* clear SET and set CLR */
1498 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1499 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1500 break;
1501
1502 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1503 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1504 "output high\n", gpio_num, gpio_shift);
1505 /* clear CLR and set SET */
1506 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1507 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1508 break;
1509
1510 default:
1511 break;
1512 }
1513
1514 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1515 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1516
1517 return 0;
1518}
1519
Eliezer Tamirf1410642008-02-28 11:51:50 -08001520static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1521{
1522 u32 spio_mask = (1 << spio_num);
1523 u32 spio_reg;
1524
1525 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1526 (spio_num > MISC_REGISTERS_SPIO_7)) {
1527 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1528 return -EINVAL;
1529 }
1530
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001531 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001532 /* read SPIO and mask except the float bits */
1533 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1534
1535 switch (mode) {
Eilon Greenstein6378c022008-08-13 15:59:25 -07001536 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001537 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1538 /* clear FLOAT and set CLR */
1539 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1540 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1541 break;
1542
Eilon Greenstein6378c022008-08-13 15:59:25 -07001543 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001544 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1545 /* clear FLOAT and set SET */
1546 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1547 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1548 break;
1549
1550 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1551 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1552 /* set FLOAT */
1553 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1554 break;
1555
1556 default:
1557 break;
1558 }
1559
1560 REG_WR(bp, MISC_REG_SPIO, spio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001561 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001562
1563 return 0;
1564}
1565
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001566void bnx2x_calc_fc_adv(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001567{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001568 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
Eilon Greensteinad33ea32009-01-14 21:24:57 -08001569 switch (bp->link_vars.ieee_fc &
1570 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001571 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001572 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001573 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001574 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001575
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001576 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001577 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001578 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001579 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001580
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001581 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001582 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001583 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001584
Eliezer Tamirf1410642008-02-28 11:51:50 -08001585 default:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001586 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001587 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001588 break;
1589 }
1590}
1591
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001592u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001593{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001594 if (!BP_NOMCP(bp)) {
1595 u8 rc;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001596 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1597 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
Eilon Greenstein19680c42008-08-13 15:47:33 -07001598 /* Initialize link parameters structure variables */
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07001599 /* It is recommended to turn off RX FC for jumbo frames
1600 for better performance */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001601 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
David S. Millerc0700f92008-12-16 23:53:20 -08001602 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07001603 else
David S. Millerc0700f92008-12-16 23:53:20 -08001604 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001605
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001606 bnx2x_acquire_phy_lock(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001607
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001608 if (load_mode == LOAD_DIAG) {
Yaniv Rosnerde6eae12010-09-07 11:41:13 +00001609 bp->link_params.loopback_mode = LOOPBACK_XGXS;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001610 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1611 }
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001612
Eilon Greenstein19680c42008-08-13 15:47:33 -07001613 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001614
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001615 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001616
Eilon Greenstein3c96c682009-01-14 21:25:31 -08001617 bnx2x_calc_fc_adv(bp);
1618
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001619 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1620 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001621 bnx2x_link_report(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001622 }
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001623 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
Eilon Greenstein19680c42008-08-13 15:47:33 -07001624 return rc;
1625 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001626 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
Eilon Greenstein19680c42008-08-13 15:47:33 -07001627 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001628}
1629
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001630void bnx2x_link_set(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001631{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001632 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001633 bnx2x_acquire_phy_lock(bp);
Yaniv Rosner54c2fb72010-09-01 09:51:23 +00001634 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001635 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001636 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001637
Eilon Greenstein19680c42008-08-13 15:47:33 -07001638 bnx2x_calc_fc_adv(bp);
1639 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00001640 BNX2X_ERR("Bootcode is missing - can not set link\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001641}
1642
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001643static void bnx2x__link_reset(struct bnx2x *bp)
1644{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001645 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001646 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein589abe32009-02-12 08:36:55 +00001647 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001648 bnx2x_release_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001649 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00001650 BNX2X_ERR("Bootcode is missing - can not reset link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001651}
1652
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001653u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001654{
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001655 u8 rc = 0;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001656
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001657 if (!BP_NOMCP(bp)) {
1658 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001659 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1660 is_serdes);
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001661 bnx2x_release_phy_lock(bp);
1662 } else
1663 BNX2X_ERR("Bootcode is missing - can not test link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001664
1665 return rc;
1666}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001667
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001668static void bnx2x_init_port_minmax(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001669{
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001670 u32 r_param = bp->link_vars.line_speed / 8;
1671 u32 fair_periodic_timeout_usec;
1672 u32 t_fair;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001673
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001674 memset(&(bp->cmng.rs_vars), 0,
1675 sizeof(struct rate_shaping_vars_per_port));
1676 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001677
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001678 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1679 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001680
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001681 /* this is the threshold below which no timer arming will occur
1682 1.25 coefficient is for the threshold to be a little bigger
1683 than the real time, to compensate for timer in-accuracy */
1684 bp->cmng.rs_vars.rs_threshold =
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001685 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1686
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001687 /* resolution of fairness timer */
1688 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1689 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1690 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001691
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001692 /* this is the threshold below which we won't arm the timer anymore */
1693 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001694
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001695 /* we multiply by 1e3/8 to get bytes/msec.
1696 We don't want the credits to pass a credit
1697 of the t_fair*FAIR_MEM (algorithm resolution) */
1698 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1699 /* since each tick is 4 usec */
1700 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001701}
1702
Eilon Greenstein2691d512009-08-12 08:22:08 +00001703/* Calculates the sum of vn_min_rates.
1704 It's needed for further normalizing of the min_rates.
1705 Returns:
1706 sum of vn_min_rates.
1707 or
1708 0 - if all the min_rates are 0.
1709 In the later case fainess algorithm should be deactivated.
1710 If not all min_rates are zero then those that are zeroes will be set to 1.
1711 */
1712static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1713{
1714 int all_zero = 1;
Eilon Greenstein2691d512009-08-12 08:22:08 +00001715 int vn;
1716
1717 bp->vn_weight_sum = 0;
1718 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001719 u32 vn_cfg = bp->mf_config[vn];
Eilon Greenstein2691d512009-08-12 08:22:08 +00001720 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1721 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1722
1723 /* Skip hidden vns */
1724 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1725 continue;
1726
1727 /* If min rate is zero - set it to 1 */
1728 if (!vn_min_rate)
1729 vn_min_rate = DEF_MIN_RATE;
1730 else
1731 all_zero = 0;
1732
1733 bp->vn_weight_sum += vn_min_rate;
1734 }
1735
Dmitry Kravkov30ae438b2011-06-14 01:33:13 +00001736 /* if ETS or all min rates are zeros - disable fairness */
1737 if (BNX2X_IS_ETS_ENABLED(bp)) {
1738 bp->cmng.flags.cmng_enables &=
1739 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1740 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
1741 } else if (all_zero) {
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001742 bp->cmng.flags.cmng_enables &=
1743 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1744 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1745 " fairness will be disabled\n");
1746 } else
1747 bp->cmng.flags.cmng_enables |=
1748 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
Eilon Greenstein2691d512009-08-12 08:22:08 +00001749}
1750
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001751static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001752{
1753 struct rate_shaping_vars_per_vn m_rs_vn;
1754 struct fairness_vars_per_vn m_fair_vn;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001755 u32 vn_cfg = bp->mf_config[vn];
1756 int func = 2*vn + BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001757 u16 vn_min_rate, vn_max_rate;
1758 int i;
1759
1760 /* If function is hidden - set min and max to zeroes */
1761 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1762 vn_min_rate = 0;
1763 vn_max_rate = 0;
1764
1765 } else {
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001766 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
1767
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001768 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1769 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001770 /* If fairness is enabled (not all min rates are zeroes) and
1771 if current min rate is zero - set it to 1.
1772 This is a requirement of the algorithm. */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001773 if (bp->vn_weight_sum && (vn_min_rate == 0))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001774 vn_min_rate = DEF_MIN_RATE;
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001775
1776 if (IS_MF_SI(bp))
1777 /* maxCfg in percents of linkspeed */
1778 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
1779 else
1780 /* maxCfg is absolute in 100Mb units */
1781 vn_max_rate = maxCfg * 100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001782 }
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001783
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001784 DP(NETIF_MSG_IFUP,
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001785 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001786 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001787
1788 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1789 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1790
1791 /* global vn counter - maximal Mbps for this vn */
1792 m_rs_vn.vn_counter.rate = vn_max_rate;
1793
1794 /* quota - number of bytes transmitted in this period */
1795 m_rs_vn.vn_counter.quota =
1796 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1797
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001798 if (bp->vn_weight_sum) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001799 /* credit for each period of the fairness algorithm:
1800 number of bytes in T_FAIR (the vn share the port rate).
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001801 vn_weight_sum should not be larger than 10000, thus
1802 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1803 than zero */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001804 m_fair_vn.vn_credit_delta =
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00001805 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1806 (8 * bp->vn_weight_sum))),
Dmitry Kravkovff80ee02011-02-28 03:37:11 +00001807 (bp->cmng.fair_vars.fair_threshold +
1808 MIN_ABOVE_THRESH));
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00001809 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001810 m_fair_vn.vn_credit_delta);
1811 }
1812
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001813 /* Store it to internal memory */
1814 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1815 REG_WR(bp, BAR_XSTRORM_INTMEM +
1816 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1817 ((u32 *)(&m_rs_vn))[i]);
1818
1819 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1820 REG_WR(bp, BAR_XSTRORM_INTMEM +
1821 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1822 ((u32 *)(&m_fair_vn))[i]);
1823}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001824
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001825static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
1826{
1827 if (CHIP_REV_IS_SLOW(bp))
1828 return CMNG_FNS_NONE;
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00001829 if (IS_MF(bp))
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001830 return CMNG_FNS_MINMAX;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001831
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001832 return CMNG_FNS_NONE;
1833}
1834
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001835void bnx2x_read_mf_cfg(struct bnx2x *bp)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001836{
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001837 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001838
1839 if (BP_NOMCP(bp))
1840 return; /* what should be the default bvalue in this case */
1841
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001842 /* For 2 port configuration the absolute function number formula
1843 * is:
1844 * abs_func = 2 * vn + BP_PORT + BP_PATH
1845 *
1846 * and there are 4 functions per port
1847 *
1848 * For 4 port configuration it is
1849 * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
1850 *
1851 * and there are 2 functions per port
1852 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001853 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001854 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
1855
1856 if (func >= E1H_FUNC_MAX)
1857 break;
1858
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001859 bp->mf_config[vn] =
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001860 MF_CFG_RD(bp, func_mf_config[func].config);
1861 }
1862}
1863
1864static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
1865{
1866
1867 if (cmng_type == CMNG_FNS_MINMAX) {
1868 int vn;
1869
1870 /* clear cmng_enables */
1871 bp->cmng.flags.cmng_enables = 0;
1872
1873 /* read mf conf from shmem */
1874 if (read_cfg)
1875 bnx2x_read_mf_cfg(bp);
1876
1877 /* Init rate shaping and fairness contexts */
1878 bnx2x_init_port_minmax(bp);
1879
1880 /* vn_weight_sum and enable fairness if not 0 */
1881 bnx2x_calc_vn_weight_sum(bp);
1882
1883 /* calculate and set min-max rate for each vn */
Dmitry Kravkovc4154f22011-03-06 10:49:25 +00001884 if (bp->port.pmf)
1885 for (vn = VN_0; vn < E1HVN_MAX; vn++)
1886 bnx2x_init_vn_minmax(bp, vn);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001887
1888 /* always enable rate shaping and fairness */
1889 bp->cmng.flags.cmng_enables |=
1890 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
1891 if (!bp->vn_weight_sum)
1892 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1893 " fairness will be disabled\n");
1894 return;
1895 }
1896
1897 /* rate shaping and fairness are disabled */
1898 DP(NETIF_MSG_IFUP,
1899 "rate shaping and fairness are disabled\n");
1900}
1901
1902static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
1903{
1904 int port = BP_PORT(bp);
1905 int func;
1906 int vn;
1907
1908 /* Set the attention towards other drivers on the same port */
1909 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1910 if (vn == BP_E1HVN(bp))
1911 continue;
1912
1913 func = ((vn << 1) | port);
1914 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1915 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1916 }
1917}
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001918
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001919/* This function is called upon link interrupt */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001920static void bnx2x_link_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001921{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001922 /* Make sure that we are synced with the current statistics */
1923 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1924
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001925 bnx2x_link_update(&bp->link_params, &bp->link_vars);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001926
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001927 if (bp->link_vars.link_up) {
1928
Eilon Greenstein1c063282009-02-12 08:36:43 +00001929 /* dropless flow control */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001930 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
Eilon Greenstein1c063282009-02-12 08:36:43 +00001931 int port = BP_PORT(bp);
1932 u32 pause_enabled = 0;
1933
1934 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1935 pause_enabled = 1;
1936
1937 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07001938 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
Eilon Greenstein1c063282009-02-12 08:36:43 +00001939 pause_enabled);
1940 }
1941
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001942 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
1943 struct host_port_stats *pstats;
1944
1945 pstats = bnx2x_sp(bp, port_stats);
1946 /* reset old bmac stats */
1947 memset(&(pstats->mac_stx[0]), 0,
1948 sizeof(struct mac_stx));
1949 }
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07001950 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001951 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1952 }
1953
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001954 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
1955 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001956
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001957 if (cmng_fns != CMNG_FNS_NONE) {
1958 bnx2x_cmng_fns_init(bp, false, cmng_fns);
1959 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
1960 } else
1961 /* rate shaping and fairness are disabled */
1962 DP(NETIF_MSG_IFUP,
1963 "single function mode without fairness\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001964 }
Dmitry Kravkov9fdc3e92011-03-06 10:49:15 +00001965
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001966 __bnx2x_link_report(bp);
1967
Dmitry Kravkov9fdc3e92011-03-06 10:49:15 +00001968 if (IS_MF(bp))
1969 bnx2x_link_sync_notify(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001970}
1971
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001972void bnx2x__link_status_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001973{
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001974 if (bp->state != BNX2X_STATE_OPEN)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001975 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001976
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001977 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
1978
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001979 if (bp->link_vars.link_up)
1980 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1981 else
1982 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1983
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001984 /* indicate link status */
1985 bnx2x_link_report(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001986}
1987
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001988static void bnx2x_pmf_update(struct bnx2x *bp)
1989{
1990 int port = BP_PORT(bp);
1991 u32 val;
1992
1993 bp->port.pmf = 1;
1994 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1995
1996 /* enable nig attention */
1997 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001998 if (bp->common.int_block == INT_BLOCK_HC) {
1999 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2000 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2001 } else if (CHIP_IS_E2(bp)) {
2002 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2003 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2004 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002005
2006 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002007}
2008
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002009/* end of Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002010
2011/* slow path */
2012
2013/*
2014 * General service functions
2015 */
2016
Eilon Greenstein2691d512009-08-12 08:22:08 +00002017/* send the MCP a request, block until there is a reply */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002018u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
Eilon Greenstein2691d512009-08-12 08:22:08 +00002019{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002020 int mb_idx = BP_FW_MB_IDX(bp);
Dmitry Kravkova5971d42011-05-25 04:55:51 +00002021 u32 seq;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002022 u32 rc = 0;
2023 u32 cnt = 1;
2024 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2025
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002026 mutex_lock(&bp->fw_mb_mutex);
Dmitry Kravkova5971d42011-05-25 04:55:51 +00002027 seq = ++bp->fw_seq;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002028 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2029 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2030
Eilon Greenstein2691d512009-08-12 08:22:08 +00002031 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2032
2033 do {
2034 /* let the FW do it's magic ... */
2035 msleep(delay);
2036
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002037 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002038
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002039 /* Give the FW up to 5 second (500*10ms) */
2040 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
Eilon Greenstein2691d512009-08-12 08:22:08 +00002041
2042 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2043 cnt*delay, rc, seq);
2044
2045 /* is this a reply to our command? */
2046 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2047 rc &= FW_MSG_CODE_MASK;
2048 else {
2049 /* FW BUG! */
2050 BNX2X_ERR("FW failed to respond!\n");
2051 bnx2x_fw_dump(bp);
2052 rc = 0;
2053 }
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002054 mutex_unlock(&bp->fw_mb_mutex);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002055
2056 return rc;
2057}
2058
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002059static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
2060{
2061#ifdef BCM_CNIC
2062 if (IS_FCOE_FP(fp) && IS_MF(bp))
2063 return false;
2064#endif
2065 return true;
2066}
2067
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002068static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2069 struct bnx2x_fastpath *fp)
2070{
2071 u16 flags = 0;
2072
2073 /* calculate queue flags */
2074 flags |= QUEUE_FLG_CACHE_ALIGN;
2075 flags |= QUEUE_FLG_HC;
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002076 flags |= IS_MF_SD(bp) ? QUEUE_FLG_OV : 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002077
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002078 flags |= QUEUE_FLG_VLAN;
2079 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002080
2081 if (!fp->disable_tpa)
2082 flags |= QUEUE_FLG_TPA;
2083
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002084 flags = stat_counter_valid(bp, fp) ?
2085 (flags | QUEUE_FLG_STATS) : (flags & ~QUEUE_FLG_STATS);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002086
2087 return flags;
2088}
2089
2090static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2091 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2092 struct bnx2x_rxq_init_params *rxq_init)
2093{
2094 u16 max_sge = 0;
2095 u16 sge_sz = 0;
2096 u16 tpa_agg_size = 0;
2097
2098 /* calculate queue flags */
2099 u16 flags = bnx2x_get_cl_flags(bp, fp);
2100
2101 if (!fp->disable_tpa) {
2102 pause->sge_th_hi = 250;
2103 pause->sge_th_lo = 150;
2104 tpa_agg_size = min_t(u32,
2105 (min_t(u32, 8, MAX_SKB_FRAGS) *
2106 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2107 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2108 SGE_PAGE_SHIFT;
2109 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2110 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2111 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2112 0xffff);
2113 }
2114
2115 /* pause - not for e1 */
2116 if (!CHIP_IS_E1(bp)) {
2117 pause->bd_th_hi = 350;
2118 pause->bd_th_lo = 250;
2119 pause->rcq_th_hi = 350;
2120 pause->rcq_th_lo = 250;
2121 pause->sge_th_hi = 0;
2122 pause->sge_th_lo = 0;
2123 pause->pri_map = 1;
2124 }
2125
2126 /* rxq setup */
2127 rxq_init->flags = flags;
2128 rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2129 rxq_init->dscr_map = fp->rx_desc_mapping;
2130 rxq_init->sge_map = fp->rx_sge_mapping;
2131 rxq_init->rcq_map = fp->rx_comp_mapping;
2132 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002133
2134 /* Always use mini-jumbo MTU for FCoE L2 ring */
2135 if (IS_FCOE_FP(fp))
2136 rxq_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2137 else
2138 rxq_init->mtu = bp->dev->mtu;
2139
2140 rxq_init->buf_sz = fp->rx_buf_size;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002141 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2142 rxq_init->cl_id = fp->cl_id;
2143 rxq_init->spcl_id = fp->cl_id;
2144 rxq_init->stat_id = fp->cl_id;
2145 rxq_init->tpa_agg_sz = tpa_agg_size;
2146 rxq_init->sge_buf_sz = sge_sz;
2147 rxq_init->max_sges_pkt = max_sge;
2148 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2149 rxq_init->fw_sb_id = fp->fw_sb_id;
2150
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002151 if (IS_FCOE_FP(fp))
2152 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
2153 else
2154 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002155
2156 rxq_init->cid = HW_CID(bp, fp->cid);
2157
2158 rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2159}
2160
2161static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2162 struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2163{
2164 u16 flags = bnx2x_get_cl_flags(bp, fp);
2165
2166 txq_init->flags = flags;
2167 txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2168 txq_init->dscr_map = fp->tx_desc_mapping;
2169 txq_init->stat_id = fp->cl_id;
2170 txq_init->cid = HW_CID(bp, fp->cid);
2171 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2172 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2173 txq_init->fw_sb_id = fp->fw_sb_id;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002174
2175 if (IS_FCOE_FP(fp)) {
2176 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
2177 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
2178 }
2179
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002180 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2181}
2182
stephen hemminger8d962862010-10-21 07:50:56 +00002183static void bnx2x_pf_init(struct bnx2x *bp)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002184{
2185 struct bnx2x_func_init_params func_init = {0};
2186 struct bnx2x_rss_params rss = {0};
2187 struct event_ring_data eq_data = { {0} };
2188 u16 flags;
2189
2190 /* pf specific setups */
2191 if (!CHIP_IS_E1(bp))
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00002192 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002193
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002194 if (CHIP_IS_E2(bp)) {
2195 /* reset IGU PF statistics: MSIX + ATTN */
2196 /* PF */
2197 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2198 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2199 (CHIP_MODE_IS_4_PORT(bp) ?
2200 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2201 /* ATTN */
2202 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2203 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2204 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2205 (CHIP_MODE_IS_4_PORT(bp) ?
2206 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2207 }
2208
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002209 /* function setup flags */
2210 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2211
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002212 if (CHIP_IS_E1x(bp))
2213 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2214 else
2215 flags |= FUNC_FLG_TPA;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002216
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002217 /* function setup */
2218
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002219 /**
2220 * Although RSS is meaningless when there is a single HW queue we
2221 * still need it enabled in order to have HW Rx hash generated.
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002222 */
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002223 rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2224 RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2225 rss.mode = bp->multi_mode;
2226 rss.result_mask = MULTI_MASK;
2227 func_init.rss = &rss;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002228
2229 func_init.func_flgs = flags;
2230 func_init.pf_id = BP_FUNC(bp);
2231 func_init.func_id = BP_FUNC(bp);
2232 func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2233 func_init.spq_map = bp->spq_mapping;
2234 func_init.spq_prod = bp->spq_prod_idx;
2235
2236 bnx2x_func_init(bp, &func_init);
2237
2238 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2239
2240 /*
2241 Congestion management values depend on the link rate
2242 There is no active link so initial link rate is set to 10 Gbps.
2243 When the link comes up The congestion management values are
2244 re-calculated according to the actual link rate.
2245 */
2246 bp->link_vars.line_speed = SPEED_10000;
2247 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2248
2249 /* Only the PMF sets the HW */
2250 if (bp->port.pmf)
2251 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2252
2253 /* no rx until link is up */
2254 bp->rx_mode = BNX2X_RX_MODE_NONE;
2255 bnx2x_set_storm_rx_mode(bp);
2256
2257 /* init Event Queue */
2258 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2259 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2260 eq_data.producer = bp->eq_prod;
2261 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2262 eq_data.sb_id = DEF_SB_ID;
2263 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2264}
2265
2266
Eilon Greenstein2691d512009-08-12 08:22:08 +00002267static void bnx2x_e1h_disable(struct bnx2x *bp)
2268{
2269 int port = BP_PORT(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002270
2271 netif_tx_disable(bp->dev);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002272
2273 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2274
Eilon Greenstein2691d512009-08-12 08:22:08 +00002275 netif_carrier_off(bp->dev);
2276}
2277
2278static void bnx2x_e1h_enable(struct bnx2x *bp)
2279{
2280 int port = BP_PORT(bp);
2281
2282 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2283
Eilon Greenstein2691d512009-08-12 08:22:08 +00002284 /* Tx queue should be only reenabled */
2285 netif_tx_wake_all_queues(bp->dev);
2286
Eilon Greenstein061bc702009-10-15 00:18:47 -07002287 /*
2288 * Should not call netif_carrier_on since it will be called if the link
2289 * is up when checking for link state
2290 */
Eilon Greenstein2691d512009-08-12 08:22:08 +00002291}
2292
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002293/* called due to MCP event (on pmf):
2294 * reread new bandwidth configuration
2295 * configure FW
2296 * notify others function about the change
2297 */
2298static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
2299{
2300 if (bp->link_vars.link_up) {
2301 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2302 bnx2x_link_sync_notify(bp);
2303 }
2304 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2305}
2306
2307static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
2308{
2309 bnx2x_config_mf_bw(bp);
2310 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
2311}
2312
Eilon Greenstein2691d512009-08-12 08:22:08 +00002313static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2314{
Eilon Greenstein2691d512009-08-12 08:22:08 +00002315 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002316
2317 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2318
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002319 /*
2320 * This is the only place besides the function initialization
2321 * where the bp->flags can change so it is done without any
2322 * locks
2323 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002324 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
Eilon Greenstein2691d512009-08-12 08:22:08 +00002325 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002326 bp->flags |= MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002327
2328 bnx2x_e1h_disable(bp);
2329 } else {
2330 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002331 bp->flags &= ~MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002332
2333 bnx2x_e1h_enable(bp);
2334 }
2335 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2336 }
2337 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002338 bnx2x_config_mf_bw(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002339 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2340 }
2341
2342 /* Report results to MCP */
2343 if (dcc_event)
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002344 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002345 else
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002346 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002347}
2348
Michael Chan28912902009-10-10 13:46:53 +00002349/* must be called under the spq lock */
2350static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2351{
2352 struct eth_spe *next_spe = bp->spq_prod_bd;
2353
2354 if (bp->spq_prod_bd == bp->spq_last_bd) {
2355 bp->spq_prod_bd = bp->spq;
2356 bp->spq_prod_idx = 0;
2357 DP(NETIF_MSG_TIMER, "end of spq\n");
2358 } else {
2359 bp->spq_prod_bd++;
2360 bp->spq_prod_idx++;
2361 }
2362 return next_spe;
2363}
2364
2365/* must be called under the spq lock */
2366static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2367{
2368 int func = BP_FUNC(bp);
2369
2370 /* Make sure that BD data is updated before writing the producer */
2371 wmb();
2372
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002373 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002374 bp->spq_prod_idx);
Michael Chan28912902009-10-10 13:46:53 +00002375 mmiowb();
2376}
2377
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002378/* the slow path queue is odd since completions arrive on the fastpath ring */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002379int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002380 u32 data_hi, u32 data_lo, int common)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002381{
Michael Chan28912902009-10-10 13:46:53 +00002382 struct eth_spe *spe;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002383 u16 type;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002384
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002385#ifdef BNX2X_STOP_ON_ERROR
2386 if (unlikely(bp->panic))
2387 return -EIO;
2388#endif
2389
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002390 spin_lock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002391
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08002392 if (common) {
2393 if (!atomic_read(&bp->eq_spq_left)) {
2394 BNX2X_ERR("BUG! EQ ring full!\n");
2395 spin_unlock_bh(&bp->spq_lock);
2396 bnx2x_panic();
2397 return -EBUSY;
2398 }
2399 } else if (!atomic_read(&bp->cq_spq_left)) {
2400 BNX2X_ERR("BUG! SPQ ring full!\n");
2401 spin_unlock_bh(&bp->spq_lock);
2402 bnx2x_panic();
2403 return -EBUSY;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002404 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08002405
Michael Chan28912902009-10-10 13:46:53 +00002406 spe = bnx2x_sp_get_next(bp);
2407
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002408 /* CID needs port number to be encoded int it */
Michael Chan28912902009-10-10 13:46:53 +00002409 spe->hdr.conn_and_cmd_data =
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002410 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2411 HW_CID(bp, cid));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002412
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002413 if (common)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002414 /* Common ramrods:
2415 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2416 * TRAFFIC_STOP, TRAFFIC_START
2417 */
2418 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2419 & SPE_HDR_CONN_TYPE;
2420 else
2421 /* ETH ramrods: SETUP, HALT */
2422 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2423 & SPE_HDR_CONN_TYPE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002424
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002425 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2426 SPE_HDR_FUNCTION_ID);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002427
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002428 spe->hdr.type = cpu_to_le16(type);
2429
2430 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2431 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2432
2433 /* stats ramrod has it's own slot on the spq */
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08002434 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002435 /* It's ok if the actual decrement is issued towards the memory
2436 * somewhere between the spin_lock and spin_unlock. Thus no
2437 * more explict memory barrier is needed.
2438 */
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08002439 if (common)
2440 atomic_dec(&bp->eq_spq_left);
2441 else
2442 atomic_dec(&bp->cq_spq_left);
2443 }
2444
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002445
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002446 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002447 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08002448 "type(0x%x) left (ETH, COMMON) (%x,%x)\n",
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002449 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2450 (u32)(U64_LO(bp->spq_mapping) +
2451 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08002452 HW_CID(bp, cid), data_hi, data_lo, type,
2453 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002454
Michael Chan28912902009-10-10 13:46:53 +00002455 bnx2x_sp_prod_update(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002456 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002457 return 0;
2458}
2459
2460/* acquire split MCP access lock register */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002461static int bnx2x_acquire_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002462{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002463 u32 j, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002464 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002465
2466 might_sleep();
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002467 for (j = 0; j < 1000; j++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002468 val = (1UL << 31);
2469 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2470 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2471 if (val & (1L << 31))
2472 break;
2473
2474 msleep(5);
2475 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002476 if (!(val & (1L << 31))) {
Eilon Greenstein19680c42008-08-13 15:47:33 -07002477 BNX2X_ERR("Cannot acquire MCP access lock register\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002478 rc = -EBUSY;
2479 }
2480
2481 return rc;
2482}
2483
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002484/* release split MCP access lock register */
2485static void bnx2x_release_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002486{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002487 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002488}
2489
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002490#define BNX2X_DEF_SB_ATT_IDX 0x0001
2491#define BNX2X_DEF_SB_IDX 0x0002
2492
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002493static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2494{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002495 struct host_sp_status_block *def_sb = bp->def_status_blk;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002496 u16 rc = 0;
2497
2498 barrier(); /* status block is written to by the chip */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002499 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2500 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002501 rc |= BNX2X_DEF_SB_ATT_IDX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002502 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002503
2504 if (bp->def_idx != def_sb->sp_sb.running_index) {
2505 bp->def_idx = def_sb->sp_sb.running_index;
2506 rc |= BNX2X_DEF_SB_IDX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002507 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002508
2509 /* Do not reorder: indecies reading should complete before handling */
2510 barrier();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002511 return rc;
2512}
2513
2514/*
2515 * slow path service functions
2516 */
2517
2518static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2519{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002520 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002521 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2522 MISC_REG_AEU_MASK_ATTN_FUNC_0;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002523 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2524 NIG_REG_MASK_INTERRUPT_PORT0;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002525 u32 aeu_mask;
Eilon Greenstein87942b42009-02-12 08:36:49 +00002526 u32 nig_mask = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002527 u32 reg_addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002528
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002529 if (bp->attn_state & asserted)
2530 BNX2X_ERR("IGU ERROR\n");
2531
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002532 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2533 aeu_mask = REG_RD(bp, aeu_addr);
2534
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002535 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002536 aeu_mask, asserted);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002537 aeu_mask &= ~(asserted & 0x3ff);
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002538 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002539
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002540 REG_WR(bp, aeu_addr, aeu_mask);
2541 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002542
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002543 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002544 bp->attn_state |= asserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002545 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002546
2547 if (asserted & ATTN_HARD_WIRED_MASK) {
2548 if (asserted & ATTN_NIG_FOR_FUNC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002549
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002550 bnx2x_acquire_phy_lock(bp);
2551
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002552 /* save nig interrupt mask */
Eilon Greenstein87942b42009-02-12 08:36:49 +00002553 nig_mask = REG_RD(bp, nig_int_mask_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002554
Yaniv Rosner361c3912011-06-14 01:33:19 +00002555 /* If nig_mask is not set, no need to call the update
2556 * function.
2557 */
2558 if (nig_mask) {
2559 REG_WR(bp, nig_int_mask_addr, 0);
2560
2561 bnx2x_link_attn(bp);
2562 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002563
2564 /* handle unicore attn? */
2565 }
2566 if (asserted & ATTN_SW_TIMER_4_FUNC)
2567 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2568
2569 if (asserted & GPIO_2_FUNC)
2570 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2571
2572 if (asserted & GPIO_3_FUNC)
2573 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2574
2575 if (asserted & GPIO_4_FUNC)
2576 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2577
2578 if (port == 0) {
2579 if (asserted & ATTN_GENERAL_ATTN_1) {
2580 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2581 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2582 }
2583 if (asserted & ATTN_GENERAL_ATTN_2) {
2584 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2585 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2586 }
2587 if (asserted & ATTN_GENERAL_ATTN_3) {
2588 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2589 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2590 }
2591 } else {
2592 if (asserted & ATTN_GENERAL_ATTN_4) {
2593 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2594 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2595 }
2596 if (asserted & ATTN_GENERAL_ATTN_5) {
2597 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2598 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2599 }
2600 if (asserted & ATTN_GENERAL_ATTN_6) {
2601 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2602 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2603 }
2604 }
2605
2606 } /* if hardwired */
2607
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002608 if (bp->common.int_block == INT_BLOCK_HC)
2609 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2610 COMMAND_REG_ATTN_BITS_SET);
2611 else
2612 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2613
2614 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2615 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2616 REG_WR(bp, reg_addr, asserted);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002617
2618 /* now set back the mask */
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002619 if (asserted & ATTN_NIG_FOR_FUNC) {
Eilon Greenstein87942b42009-02-12 08:36:49 +00002620 REG_WR(bp, nig_int_mask_addr, nig_mask);
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002621 bnx2x_release_phy_lock(bp);
2622 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002623}
2624
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002625static inline void bnx2x_fan_failure(struct bnx2x *bp)
2626{
2627 int port = BP_PORT(bp);
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002628 u32 ext_phy_config;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002629 /* mark the failure */
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002630 ext_phy_config =
2631 SHMEM_RD(bp,
2632 dev_info.port_hw_config[port].external_phy_config);
2633
2634 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2635 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002636 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002637 ext_phy_config);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002638
2639 /* log the failure */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002640 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2641 " the driver to shutdown the card to prevent permanent"
2642 " damage. Please contact OEM Support for assistance\n");
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002643}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00002644
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002645static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2646{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002647 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002648 int reg_offset;
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00002649 u32 val;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002650
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002651 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2652 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002653
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002654 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002655
2656 val = REG_RD(bp, reg_offset);
2657 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2658 REG_WR(bp, reg_offset, val);
2659
2660 BNX2X_ERR("SPIO5 hw attention\n");
2661
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002662 /* Fan failure attention */
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00002663 bnx2x_hw_reset_phy(&bp->link_params);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002664 bnx2x_fan_failure(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002665 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002666
Eilon Greenstein589abe32009-02-12 08:36:55 +00002667 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2668 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2669 bnx2x_acquire_phy_lock(bp);
2670 bnx2x_handle_module_detect_int(&bp->link_params);
2671 bnx2x_release_phy_lock(bp);
2672 }
2673
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002674 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2675
2676 val = REG_RD(bp, reg_offset);
2677 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2678 REG_WR(bp, reg_offset, val);
2679
2680 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002681 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002682 bnx2x_panic();
2683 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002684}
2685
2686static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2687{
2688 u32 val;
2689
Eilon Greenstein0626b892009-02-12 08:38:14 +00002690 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002691
2692 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2693 BNX2X_ERR("DB hw attention 0x%x\n", val);
2694 /* DORQ discard attention */
2695 if (val & 0x2)
2696 BNX2X_ERR("FATAL error from DORQ\n");
2697 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002698
2699 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2700
2701 int port = BP_PORT(bp);
2702 int reg_offset;
2703
2704 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2705 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2706
2707 val = REG_RD(bp, reg_offset);
2708 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2709 REG_WR(bp, reg_offset, val);
2710
2711 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002712 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002713 bnx2x_panic();
2714 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002715}
2716
2717static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2718{
2719 u32 val;
2720
2721 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2722
2723 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2724 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2725 /* CFC error attention */
2726 if (val & 0x2)
2727 BNX2X_ERR("FATAL error from CFC\n");
2728 }
2729
2730 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2731
2732 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2733 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2734 /* RQ_USDMDP_FIFO_OVERFLOW */
2735 if (val & 0x18000)
2736 BNX2X_ERR("FATAL error from PXP\n");
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002737 if (CHIP_IS_E2(bp)) {
2738 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
2739 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
2740 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002741 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002742
2743 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2744
2745 int port = BP_PORT(bp);
2746 int reg_offset;
2747
2748 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2749 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2750
2751 val = REG_RD(bp, reg_offset);
2752 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2753 REG_WR(bp, reg_offset, val);
2754
2755 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002756 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002757 bnx2x_panic();
2758 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002759}
2760
2761static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2762{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002763 u32 val;
2764
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002765 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2766
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002767 if (attn & BNX2X_PMF_LINK_ASSERT) {
2768 int func = BP_FUNC(bp);
2769
2770 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002771 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
2772 func_mf_config[BP_ABS_FUNC(bp)].config);
2773 val = SHMEM_RD(bp,
2774 func_mb[BP_FW_MB_IDX(bp)].drv_status);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002775 if (val & DRV_STATUS_DCC_EVENT_MASK)
2776 bnx2x_dcc_event(bp,
2777 (val & DRV_STATUS_DCC_EVENT_MASK));
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002778
2779 if (val & DRV_STATUS_SET_MF_BW)
2780 bnx2x_set_mf_bw(bp);
2781
Eilon Greenstein2691d512009-08-12 08:22:08 +00002782 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002783 bnx2x_pmf_update(bp);
2784
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00002785 /* Always call it here: bnx2x_link_report() will
2786 * prevent the link indication duplication.
2787 */
2788 bnx2x__link_status_update(bp);
2789
Vladislav Zolotarove4901dd2010-12-13 05:44:18 +00002790 if (bp->port.pmf &&
Shmulik Ravid785b9b12010-12-30 06:27:03 +00002791 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
2792 bp->dcbx_enabled > 0)
Vladislav Zolotarove4901dd2010-12-13 05:44:18 +00002793 /* start dcbx state machine */
2794 bnx2x_dcbx_set_params(bp,
2795 BNX2X_DCBX_STATE_NEG_RECEIVED);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002796 } else if (attn & BNX2X_MC_ASSERT_BITS) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002797
2798 BNX2X_ERR("MC assert!\n");
2799 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2800 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2801 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2802 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2803 bnx2x_panic();
2804
2805 } else if (attn & BNX2X_MCP_ASSERT) {
2806
2807 BNX2X_ERR("MCP assert!\n");
2808 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002809 bnx2x_fw_dump(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002810
2811 } else
2812 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2813 }
2814
2815 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002816 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2817 if (attn & BNX2X_GRC_TIMEOUT) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002818 val = CHIP_IS_E1(bp) ? 0 :
2819 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002820 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2821 }
2822 if (attn & BNX2X_GRC_RSV) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002823 val = CHIP_IS_E1(bp) ? 0 :
2824 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002825 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2826 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002827 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002828 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002829}
2830
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002831#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
2832#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
2833#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
2834#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
2835#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002836
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002837/*
2838 * should be run under rtnl lock
2839 */
2840static inline void bnx2x_set_reset_done(struct bnx2x *bp)
2841{
2842 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2843 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
2844 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2845 barrier();
2846 mmiowb();
2847}
2848
2849/*
2850 * should be run under rtnl lock
2851 */
2852static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
2853{
2854 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2855 val |= (1 << 16);
2856 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2857 barrier();
2858 mmiowb();
2859}
2860
2861/*
2862 * should be run under rtnl lock
2863 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002864bool bnx2x_reset_is_done(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002865{
2866 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2867 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
2868 return (val & RESET_DONE_FLAG_MASK) ? false : true;
2869}
2870
2871/*
2872 * should be run under rtnl lock
2873 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002874inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002875{
2876 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2877
2878 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2879
2880 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
2881 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2882 barrier();
2883 mmiowb();
2884}
2885
2886/*
2887 * should be run under rtnl lock
2888 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002889u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002890{
2891 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2892
2893 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2894
2895 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
2896 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2897 barrier();
2898 mmiowb();
2899
2900 return val1;
2901}
2902
2903/*
2904 * should be run under rtnl lock
2905 */
2906static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
2907{
2908 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
2909}
2910
2911static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
2912{
2913 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2914 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
2915}
2916
2917static inline void _print_next_block(int idx, const char *blk)
2918{
2919 if (idx)
2920 pr_cont(", ");
2921 pr_cont("%s", blk);
2922}
2923
2924static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
2925{
2926 int i = 0;
2927 u32 cur_bit = 0;
2928 for (i = 0; sig; i++) {
2929 cur_bit = ((u32)0x1 << i);
2930 if (sig & cur_bit) {
2931 switch (cur_bit) {
2932 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
2933 _print_next_block(par_num++, "BRB");
2934 break;
2935 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
2936 _print_next_block(par_num++, "PARSER");
2937 break;
2938 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
2939 _print_next_block(par_num++, "TSDM");
2940 break;
2941 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
2942 _print_next_block(par_num++, "SEARCHER");
2943 break;
2944 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
2945 _print_next_block(par_num++, "TSEMI");
2946 break;
2947 }
2948
2949 /* Clear the bit */
2950 sig &= ~cur_bit;
2951 }
2952 }
2953
2954 return par_num;
2955}
2956
2957static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
2958{
2959 int i = 0;
2960 u32 cur_bit = 0;
2961 for (i = 0; sig; i++) {
2962 cur_bit = ((u32)0x1 << i);
2963 if (sig & cur_bit) {
2964 switch (cur_bit) {
2965 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
2966 _print_next_block(par_num++, "PBCLIENT");
2967 break;
2968 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
2969 _print_next_block(par_num++, "QM");
2970 break;
2971 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
2972 _print_next_block(par_num++, "XSDM");
2973 break;
2974 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
2975 _print_next_block(par_num++, "XSEMI");
2976 break;
2977 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
2978 _print_next_block(par_num++, "DOORBELLQ");
2979 break;
2980 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
2981 _print_next_block(par_num++, "VAUX PCI CORE");
2982 break;
2983 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
2984 _print_next_block(par_num++, "DEBUG");
2985 break;
2986 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
2987 _print_next_block(par_num++, "USDM");
2988 break;
2989 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
2990 _print_next_block(par_num++, "USEMI");
2991 break;
2992 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
2993 _print_next_block(par_num++, "UPB");
2994 break;
2995 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
2996 _print_next_block(par_num++, "CSDM");
2997 break;
2998 }
2999
3000 /* Clear the bit */
3001 sig &= ~cur_bit;
3002 }
3003 }
3004
3005 return par_num;
3006}
3007
3008static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3009{
3010 int i = 0;
3011 u32 cur_bit = 0;
3012 for (i = 0; sig; i++) {
3013 cur_bit = ((u32)0x1 << i);
3014 if (sig & cur_bit) {
3015 switch (cur_bit) {
3016 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3017 _print_next_block(par_num++, "CSEMI");
3018 break;
3019 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3020 _print_next_block(par_num++, "PXP");
3021 break;
3022 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3023 _print_next_block(par_num++,
3024 "PXPPCICLOCKCLIENT");
3025 break;
3026 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3027 _print_next_block(par_num++, "CFC");
3028 break;
3029 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3030 _print_next_block(par_num++, "CDU");
3031 break;
3032 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3033 _print_next_block(par_num++, "IGU");
3034 break;
3035 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3036 _print_next_block(par_num++, "MISC");
3037 break;
3038 }
3039
3040 /* Clear the bit */
3041 sig &= ~cur_bit;
3042 }
3043 }
3044
3045 return par_num;
3046}
3047
3048static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3049{
3050 int i = 0;
3051 u32 cur_bit = 0;
3052 for (i = 0; sig; i++) {
3053 cur_bit = ((u32)0x1 << i);
3054 if (sig & cur_bit) {
3055 switch (cur_bit) {
3056 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3057 _print_next_block(par_num++, "MCP ROM");
3058 break;
3059 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3060 _print_next_block(par_num++, "MCP UMP RX");
3061 break;
3062 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3063 _print_next_block(par_num++, "MCP UMP TX");
3064 break;
3065 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3066 _print_next_block(par_num++, "MCP SCPAD");
3067 break;
3068 }
3069
3070 /* Clear the bit */
3071 sig &= ~cur_bit;
3072 }
3073 }
3074
3075 return par_num;
3076}
3077
3078static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3079 u32 sig2, u32 sig3)
3080{
3081 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3082 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3083 int par_num = 0;
3084 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3085 "[0]:0x%08x [1]:0x%08x "
3086 "[2]:0x%08x [3]:0x%08x\n",
3087 sig0 & HW_PRTY_ASSERT_SET_0,
3088 sig1 & HW_PRTY_ASSERT_SET_1,
3089 sig2 & HW_PRTY_ASSERT_SET_2,
3090 sig3 & HW_PRTY_ASSERT_SET_3);
3091 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3092 bp->dev->name);
3093 par_num = bnx2x_print_blocks_with_parity0(
3094 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3095 par_num = bnx2x_print_blocks_with_parity1(
3096 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3097 par_num = bnx2x_print_blocks_with_parity2(
3098 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3099 par_num = bnx2x_print_blocks_with_parity3(
3100 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3101 printk("\n");
3102 return true;
3103 } else
3104 return false;
3105}
3106
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003107bool bnx2x_chk_parity_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003108{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003109 struct attn_route attn;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003110 int port = BP_PORT(bp);
3111
3112 attn.sig[0] = REG_RD(bp,
3113 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3114 port*4);
3115 attn.sig[1] = REG_RD(bp,
3116 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3117 port*4);
3118 attn.sig[2] = REG_RD(bp,
3119 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3120 port*4);
3121 attn.sig[3] = REG_RD(bp,
3122 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3123 port*4);
3124
3125 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3126 attn.sig[3]);
3127}
3128
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003129
3130static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3131{
3132 u32 val;
3133 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3134
3135 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3136 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3137 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3138 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3139 "ADDRESS_ERROR\n");
3140 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3141 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3142 "INCORRECT_RCV_BEHAVIOR\n");
3143 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3144 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3145 "WAS_ERROR_ATTN\n");
3146 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3147 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3148 "VF_LENGTH_VIOLATION_ATTN\n");
3149 if (val &
3150 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3151 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3152 "VF_GRC_SPACE_VIOLATION_ATTN\n");
3153 if (val &
3154 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3155 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3156 "VF_MSIX_BAR_VIOLATION_ATTN\n");
3157 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3158 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3159 "TCPL_ERROR_ATTN\n");
3160 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3161 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3162 "TCPL_IN_TWO_RCBS_ATTN\n");
3163 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3164 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3165 "CSSNOOP_FIFO_OVERFLOW\n");
3166 }
3167 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3168 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3169 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3170 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3171 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3172 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3173 BNX2X_ERR("ATC_ATC_INT_STS_REG"
3174 "_ATC_TCPL_TO_NOT_PEND\n");
3175 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3176 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3177 "ATC_GPA_MULTIPLE_HITS\n");
3178 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3179 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3180 "ATC_RCPL_TO_EMPTY_CNT\n");
3181 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3182 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3183 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3184 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3185 "ATC_IREQ_LESS_THAN_STU\n");
3186 }
3187
3188 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3189 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3190 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3191 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3192 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3193 }
3194
3195}
3196
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003197static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3198{
3199 struct attn_route attn, *group_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003200 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003201 int index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003202 u32 reg_addr;
3203 u32 val;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003204 u32 aeu_mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003205
3206 /* need to take HW lock because MCP or other port might also
3207 try to handle this event */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003208 bnx2x_acquire_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003209
Vladislav Zolotarov4a33bc02011-01-09 02:20:04 +00003210 if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) {
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003211 bp->recovery_state = BNX2X_RECOVERY_INIT;
3212 bnx2x_set_reset_in_progress(bp);
3213 schedule_delayed_work(&bp->reset_task, 0);
3214 /* Disable HW interrupts */
3215 bnx2x_int_disable(bp);
3216 bnx2x_release_alr(bp);
3217 /* In case of parity errors don't handle attentions so that
3218 * other function would "see" parity errors.
3219 */
3220 return;
3221 }
3222
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003223 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3224 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3225 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3226 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003227 if (CHIP_IS_E2(bp))
3228 attn.sig[4] =
3229 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3230 else
3231 attn.sig[4] = 0;
3232
3233 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3234 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003235
3236 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3237 if (deasserted & (1 << index)) {
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003238 group_mask = &bp->attn_group[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003239
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003240 DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3241 "%08x %08x %08x\n",
3242 index,
3243 group_mask->sig[0], group_mask->sig[1],
3244 group_mask->sig[2], group_mask->sig[3],
3245 group_mask->sig[4]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003246
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003247 bnx2x_attn_int_deasserted4(bp,
3248 attn.sig[4] & group_mask->sig[4]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003249 bnx2x_attn_int_deasserted3(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003250 attn.sig[3] & group_mask->sig[3]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003251 bnx2x_attn_int_deasserted1(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003252 attn.sig[1] & group_mask->sig[1]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003253 bnx2x_attn_int_deasserted2(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003254 attn.sig[2] & group_mask->sig[2]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003255 bnx2x_attn_int_deasserted0(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003256 attn.sig[0] & group_mask->sig[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003257 }
3258 }
3259
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003260 bnx2x_release_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003261
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003262 if (bp->common.int_block == INT_BLOCK_HC)
3263 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3264 COMMAND_REG_ATTN_BITS_CLR);
3265 else
3266 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003267
3268 val = ~deasserted;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003269 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3270 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
Eilon Greenstein5c862842008-08-13 15:51:48 -07003271 REG_WR(bp, reg_addr, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003272
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003273 if (~bp->attn_state & deasserted)
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003274 BNX2X_ERR("IGU ERROR\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003275
3276 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3277 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3278
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003279 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3280 aeu_mask = REG_RD(bp, reg_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003281
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003282 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3283 aeu_mask, deasserted);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003284 aeu_mask |= (deasserted & 0x3ff);
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003285 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3286
3287 REG_WR(bp, reg_addr, aeu_mask);
3288 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003289
3290 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3291 bp->attn_state &= ~deasserted;
3292 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3293}
3294
3295static void bnx2x_attn_int(struct bnx2x *bp)
3296{
3297 /* read local copy of bits */
Eilon Greenstein68d59482009-01-14 21:27:36 -08003298 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3299 attn_bits);
3300 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3301 attn_bits_ack);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003302 u32 attn_state = bp->attn_state;
3303
3304 /* look for changed bits */
3305 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3306 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3307
3308 DP(NETIF_MSG_HW,
3309 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3310 attn_bits, attn_ack, asserted, deasserted);
3311
3312 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003313 BNX2X_ERR("BAD attention state\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003314
3315 /* handle bits that were raised */
3316 if (asserted)
3317 bnx2x_attn_int_asserted(bp, asserted);
3318
3319 if (deasserted)
3320 bnx2x_attn_int_deasserted(bp, deasserted);
3321}
3322
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003323static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3324{
3325 /* No memory barriers */
3326 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3327 mmiowb(); /* keep prod updates ordered */
3328}
3329
3330#ifdef BCM_CNIC
3331static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3332 union event_ring_elem *elem)
3333{
3334 if (!bp->cnic_eth_dev.starting_cid ||
Vladislav Zolotarovc3a8ce62011-05-22 10:08:09 +00003335 (cid < bp->cnic_eth_dev.starting_cid &&
3336 cid != bp->cnic_eth_dev.iscsi_l2_cid))
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003337 return 1;
3338
3339 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3340
3341 if (unlikely(elem->message.data.cfc_del_event.error)) {
3342 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3343 cid);
3344 bnx2x_panic_dump(bp);
3345 }
3346 bnx2x_cnic_cfc_comp(bp, cid);
3347 return 0;
3348}
3349#endif
3350
3351static void bnx2x_eq_int(struct bnx2x *bp)
3352{
3353 u16 hw_cons, sw_cons, sw_prod;
3354 union event_ring_elem *elem;
3355 u32 cid;
3356 u8 opcode;
3357 int spqe_cnt = 0;
3358
3359 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3360
3361 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3362 * when we get the the next-page we nned to adjust so the loop
3363 * condition below will be met. The next element is the size of a
3364 * regular element and hence incrementing by 1
3365 */
3366 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3367 hw_cons++;
3368
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003369 /* This function may never run in parallel with itself for a
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003370 * specific bp, thus there is no need in "paired" read memory
3371 * barrier here.
3372 */
3373 sw_cons = bp->eq_cons;
3374 sw_prod = bp->eq_prod;
3375
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08003376 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->cq_spq_left %u\n",
3377 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003378
3379 for (; sw_cons != hw_cons;
3380 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3381
3382
3383 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3384
3385 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3386 opcode = elem->message.opcode;
3387
3388
3389 /* handle eq element */
3390 switch (opcode) {
3391 case EVENT_RING_OPCODE_STAT_QUERY:
3392 DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3393 /* nothing to do with stats comp */
3394 continue;
3395
3396 case EVENT_RING_OPCODE_CFC_DEL:
3397 /* handle according to cid range */
3398 /*
3399 * we may want to verify here that the bp state is
3400 * HALTING
3401 */
3402 DP(NETIF_MSG_IFDOWN,
3403 "got delete ramrod for MULTI[%d]\n", cid);
3404#ifdef BCM_CNIC
3405 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3406 goto next_spqe;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003407 if (cid == BNX2X_FCOE_ETH_CID)
3408 bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
3409 else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003410#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003411 bnx2x_fp(bp, cid, state) =
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003412 BNX2X_FP_STATE_CLOSED;
3413
3414 goto next_spqe;
Vladislav Zolotarove4901dd2010-12-13 05:44:18 +00003415
3416 case EVENT_RING_OPCODE_STOP_TRAFFIC:
3417 DP(NETIF_MSG_IFUP, "got STOP TRAFFIC\n");
3418 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
3419 goto next_spqe;
3420 case EVENT_RING_OPCODE_START_TRAFFIC:
3421 DP(NETIF_MSG_IFUP, "got START TRAFFIC\n");
3422 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
3423 goto next_spqe;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003424 }
3425
3426 switch (opcode | bp->state) {
3427 case (EVENT_RING_OPCODE_FUNCTION_START |
3428 BNX2X_STATE_OPENING_WAIT4_PORT):
3429 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3430 bp->state = BNX2X_STATE_FUNC_STARTED;
3431 break;
3432
3433 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3434 BNX2X_STATE_CLOSING_WAIT4_HALT):
3435 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3436 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3437 break;
3438
3439 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3440 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3441 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08003442 if (elem->message.data.set_mac_event.echo)
3443 bp->set_mac_pending = 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003444 break;
3445
3446 case (EVENT_RING_OPCODE_SET_MAC |
3447 BNX2X_STATE_CLOSING_WAIT4_HALT):
3448 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08003449 if (elem->message.data.set_mac_event.echo)
3450 bp->set_mac_pending = 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003451 break;
3452 default:
3453 /* unknown event log error and continue */
3454 BNX2X_ERR("Unknown EQ event %d\n",
3455 elem->message.opcode);
3456 }
3457next_spqe:
3458 spqe_cnt++;
3459 } /* for */
3460
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00003461 smp_mb__before_atomic_inc();
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08003462 atomic_add(spqe_cnt, &bp->eq_spq_left);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003463
3464 bp->eq_cons = sw_cons;
3465 bp->eq_prod = sw_prod;
3466 /* Make sure that above mem writes were issued towards the memory */
3467 smp_wmb();
3468
3469 /* update producer */
3470 bnx2x_update_eq_prod(bp, bp->eq_prod);
3471}
3472
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003473static void bnx2x_sp_task(struct work_struct *work)
3474{
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003475 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003476 u16 status;
3477
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003478 status = bnx2x_update_dsb_idx(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003479/* if (status == 0) */
3480/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003481
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003482 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003483
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003484 /* HW attentions */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003485 if (status & BNX2X_DEF_SB_ATT_IDX) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003486 bnx2x_attn_int(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003487 status &= ~BNX2X_DEF_SB_ATT_IDX;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003488 }
3489
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003490 /* SP events: STAT_QUERY and others */
3491 if (status & BNX2X_DEF_SB_IDX) {
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003492#ifdef BCM_CNIC
3493 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003494
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003495 if ((!NO_FCOE(bp)) &&
3496 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp)))
3497 napi_schedule(&bnx2x_fcoe(bp, napi));
3498#endif
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003499 /* Handle EQ completions */
3500 bnx2x_eq_int(bp);
3501
3502 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3503 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3504
3505 status &= ~BNX2X_DEF_SB_IDX;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003506 }
3507
3508 if (unlikely(status))
3509 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3510 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003511
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003512 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3513 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003514}
3515
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003516irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003517{
3518 struct net_device *dev = dev_instance;
3519 struct bnx2x *bp = netdev_priv(dev);
3520
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003521 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3522 IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003523
3524#ifdef BNX2X_STOP_ON_ERROR
3525 if (unlikely(bp->panic))
3526 return IRQ_HANDLED;
3527#endif
3528
Michael Chan993ac7b2009-10-10 13:46:56 +00003529#ifdef BCM_CNIC
3530 {
3531 struct cnic_ops *c_ops;
3532
3533 rcu_read_lock();
3534 c_ops = rcu_dereference(bp->cnic_ops);
3535 if (c_ops)
3536 c_ops->cnic_handler(bp->cnic_data, NULL);
3537 rcu_read_unlock();
3538 }
3539#endif
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003540 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003541
3542 return IRQ_HANDLED;
3543}
3544
3545/* end of slow path */
3546
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003547static void bnx2x_timer(unsigned long data)
3548{
3549 struct bnx2x *bp = (struct bnx2x *) data;
3550
3551 if (!netif_running(bp->dev))
3552 return;
3553
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003554 if (poll) {
3555 struct bnx2x_fastpath *fp = &bp->fp[0];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003556
Eilon Greenstein7961f792009-03-02 07:59:31 +00003557 bnx2x_tx_int(fp);
David S. Millerb8ee8322011-04-17 16:56:12 -07003558 bnx2x_rx_int(fp, 1000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003559 }
3560
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003561 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003562 int mb_idx = BP_FW_MB_IDX(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003563 u32 drv_pulse;
3564 u32 mcp_pulse;
3565
3566 ++bp->fw_drv_pulse_wr_seq;
3567 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3568 /* TBD - add SYSTEM_TIME */
3569 drv_pulse = bp->fw_drv_pulse_wr_seq;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003570 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003571
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003572 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003573 MCP_PULSE_SEQ_MASK);
3574 /* The delta between driver pulse and mcp response
3575 * should be 1 (before mcp response) or 0 (after mcp response)
3576 */
3577 if ((drv_pulse != mcp_pulse) &&
3578 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3579 /* someone lost a heartbeat... */
3580 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3581 drv_pulse, mcp_pulse);
3582 }
3583 }
3584
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07003585 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003586 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003587
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003588 mod_timer(&bp->timer, jiffies + bp->current_interval);
3589}
3590
3591/* end of Statistics */
3592
3593/* nic init */
3594
3595/*
3596 * nic init service functions
3597 */
3598
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003599static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003600{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003601 u32 i;
3602 if (!(len%4) && !(addr%4))
3603 for (i = 0; i < len; i += 4)
3604 REG_WR(bp, addr + i, fill);
3605 else
3606 for (i = 0; i < len; i++)
3607 REG_WR8(bp, addr + i, fill);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003608
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003609}
3610
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003611/* helper: writes FP SP data to FW - data_size in dwords */
3612static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3613 int fw_sb_id,
3614 u32 *sb_data_p,
3615 u32 data_size)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003616{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003617 int index;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003618 for (index = 0; index < data_size; index++)
3619 REG_WR(bp, BAR_CSTRORM_INTMEM +
3620 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3621 sizeof(u32)*index,
3622 *(sb_data_p + index));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003623}
3624
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003625static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3626{
3627 u32 *sb_data_p;
3628 u32 data_size = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003629 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003630 struct hc_status_block_data_e1x sb_data_e1x;
3631
3632 /* disable the function first */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003633 if (CHIP_IS_E2(bp)) {
3634 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3635 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3636 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3637 sb_data_e2.common.p_func.vf_valid = false;
3638 sb_data_p = (u32 *)&sb_data_e2;
3639 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3640 } else {
3641 memset(&sb_data_e1x, 0,
3642 sizeof(struct hc_status_block_data_e1x));
3643 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3644 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3645 sb_data_e1x.common.p_func.vf_valid = false;
3646 sb_data_p = (u32 *)&sb_data_e1x;
3647 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3648 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003649 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3650
3651 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3652 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
3653 CSTORM_STATUS_BLOCK_SIZE);
3654 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3655 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
3656 CSTORM_SYNC_BLOCK_SIZE);
3657}
3658
3659/* helper: writes SP SB data to FW */
3660static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
3661 struct hc_sp_status_block_data *sp_sb_data)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003662{
3663 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003664 int i;
3665 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
3666 REG_WR(bp, BAR_CSTRORM_INTMEM +
3667 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
3668 i*sizeof(u32),
3669 *((u32 *)sp_sb_data + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003670}
3671
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003672static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
3673{
3674 int func = BP_FUNC(bp);
3675 struct hc_sp_status_block_data sp_sb_data;
3676 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
3677
3678 sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
3679 sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
3680 sp_sb_data.p_func.vf_valid = false;
3681
3682 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
3683
3684 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3685 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
3686 CSTORM_SP_STATUS_BLOCK_SIZE);
3687 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3688 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
3689 CSTORM_SP_SYNC_BLOCK_SIZE);
3690
3691}
3692
3693
3694static inline
3695void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
3696 int igu_sb_id, int igu_seg_id)
3697{
3698 hc_sm->igu_sb_id = igu_sb_id;
3699 hc_sm->igu_seg_id = igu_seg_id;
3700 hc_sm->timer_value = 0xFF;
3701 hc_sm->time_to_expire = 0xFFFFFFFF;
3702}
3703
stephen hemminger8d962862010-10-21 07:50:56 +00003704static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003705 u8 vf_valid, int fw_sb_id, int igu_sb_id)
3706{
3707 int igu_seg_id;
3708
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003709 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003710 struct hc_status_block_data_e1x sb_data_e1x;
3711 struct hc_status_block_sm *hc_sm_p;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003712 int data_size;
3713 u32 *sb_data_p;
3714
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003715 if (CHIP_INT_MODE_IS_BC(bp))
3716 igu_seg_id = HC_SEG_ACCESS_NORM;
3717 else
3718 igu_seg_id = IGU_SEG_ACCESS_NORM;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003719
3720 bnx2x_zero_fp_sb(bp, fw_sb_id);
3721
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003722 if (CHIP_IS_E2(bp)) {
3723 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3724 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
3725 sb_data_e2.common.p_func.vf_id = vfid;
3726 sb_data_e2.common.p_func.vf_valid = vf_valid;
3727 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
3728 sb_data_e2.common.same_igu_sb_1b = true;
3729 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
3730 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
3731 hc_sm_p = sb_data_e2.common.state_machine;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003732 sb_data_p = (u32 *)&sb_data_e2;
3733 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3734 } else {
3735 memset(&sb_data_e1x, 0,
3736 sizeof(struct hc_status_block_data_e1x));
3737 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
3738 sb_data_e1x.common.p_func.vf_id = 0xff;
3739 sb_data_e1x.common.p_func.vf_valid = false;
3740 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
3741 sb_data_e1x.common.same_igu_sb_1b = true;
3742 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
3743 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
3744 hc_sm_p = sb_data_e1x.common.state_machine;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003745 sb_data_p = (u32 *)&sb_data_e1x;
3746 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3747 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003748
3749 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
3750 igu_sb_id, igu_seg_id);
3751 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
3752 igu_sb_id, igu_seg_id);
3753
3754 DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
3755
3756 /* write indecies to HW */
3757 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3758}
3759
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00003760void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003761 u8 sb_index, u8 disable, u16 usec)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003762{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003763 int port = BP_PORT(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003764 u8 ticks = usec / BNX2X_BTR;
3765
3766 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3767
3768 disable = disable ? 1 : (usec ? 0 : 1);
3769 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
3770}
3771
3772static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
3773 u16 tx_usec, u16 rx_usec)
3774{
3775 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
3776 false, rx_usec);
3777 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
3778 false, tx_usec);
3779}
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003780
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003781static void bnx2x_init_def_sb(struct bnx2x *bp)
3782{
3783 struct host_sp_status_block *def_sb = bp->def_status_blk;
3784 dma_addr_t mapping = bp->def_status_blk_mapping;
3785 int igu_sp_sb_index;
3786 int igu_seg_id;
3787 int port = BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003788 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003789 int reg_offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003790 u64 section;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003791 int index;
3792 struct hc_sp_status_block_data sp_sb_data;
3793 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
3794
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003795 if (CHIP_INT_MODE_IS_BC(bp)) {
3796 igu_sp_sb_index = DEF_SB_IGU_ID;
3797 igu_seg_id = HC_SEG_ACCESS_DEF;
3798 } else {
3799 igu_sp_sb_index = bp->igu_dsb_id;
3800 igu_seg_id = IGU_SEG_ACCESS_DEF;
3801 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003802
3803 /* ATTN */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003804 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003805 atten_status_block);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003806 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003807
Eliezer Tamir49d66772008-02-28 11:53:13 -08003808 bp->attn_state = 0;
3809
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003810 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
3811 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003812 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003813 int sindex;
3814 /* take care of sig[0]..sig[4] */
3815 for (sindex = 0; sindex < 4; sindex++)
3816 bp->attn_group[index].sig[sindex] =
3817 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003818
3819 if (CHIP_IS_E2(bp))
3820 /*
3821 * enable5 is separate from the rest of the registers,
3822 * and therefore the address skip is 4
3823 * and not 16 between the different groups
3824 */
3825 bp->attn_group[index].sig[4] = REG_RD(bp,
3826 reg_offset + 0x10 + 0x4*index);
3827 else
3828 bp->attn_group[index].sig[4] = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003829 }
3830
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003831 if (bp->common.int_block == INT_BLOCK_HC) {
3832 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
3833 HC_REG_ATTN_MSG0_ADDR_L);
3834
3835 REG_WR(bp, reg_offset, U64_LO(section));
3836 REG_WR(bp, reg_offset + 4, U64_HI(section));
3837 } else if (CHIP_IS_E2(bp)) {
3838 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
3839 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
3840 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003841
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003842 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
3843 sp_sb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003844
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003845 bnx2x_zero_sp_sb(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003846
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003847 sp_sb_data.host_sb_addr.lo = U64_LO(section);
3848 sp_sb_data.host_sb_addr.hi = U64_HI(section);
3849 sp_sb_data.igu_sb_id = igu_sp_sb_index;
3850 sp_sb_data.igu_seg_id = igu_seg_id;
3851 sp_sb_data.p_func.pf_id = func;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003852 sp_sb_data.p_func.vnic_id = BP_VN(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003853 sp_sb_data.p_func.vf_id = 0xff;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003854
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003855 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003856
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003857 bp->stats_pending = 0;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07003858 bp->set_mac_pending = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003859
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003860 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003861}
3862
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003863void bnx2x_update_coalesce(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003864{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003865 int i;
3866
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003867 for_each_eth_queue(bp, i)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003868 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
Ariel Elior423cfa7e2011-03-14 13:43:22 -07003869 bp->tx_ticks, bp->rx_ticks);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003870}
3871
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003872static void bnx2x_init_sp_ring(struct bnx2x *bp)
3873{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003874 spin_lock_init(&bp->spq_lock);
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08003875 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003876
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003877 bp->spq_prod_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003878 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
3879 bp->spq_prod_bd = bp->spq;
3880 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003881}
3882
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003883static void bnx2x_init_eq_ring(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003884{
3885 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003886 for (i = 1; i <= NUM_EQ_PAGES; i++) {
3887 union event_ring_elem *elem =
3888 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003889
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003890 elem->next_page.addr.hi =
3891 cpu_to_le32(U64_HI(bp->eq_mapping +
3892 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
3893 elem->next_page.addr.lo =
3894 cpu_to_le32(U64_LO(bp->eq_mapping +
3895 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003896 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003897 bp->eq_cons = 0;
3898 bp->eq_prod = NUM_EQ_DESC;
3899 bp->eq_cons_sb = BNX2X_EQ_INDEX;
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08003900 /* we want a warning message before it gets rought... */
3901 atomic_set(&bp->eq_spq_left,
3902 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003903}
3904
Tom Herbertab532cf2011-02-16 10:27:02 +00003905static void bnx2x_init_ind_table(struct bnx2x *bp)
3906{
3907 int i;
3908
3909 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
3910 bp->rx_indir_table[i] = i % BNX2X_NUM_ETH_QUEUES(bp);
3911
3912 bnx2x_push_indir_table(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003913}
3914
Eilon Greenstein471de712008-08-13 15:49:35 -07003915static void bnx2x_init_internal_common(struct bnx2x *bp)
3916{
3917 int i;
3918
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003919 if (!CHIP_IS_E1(bp)) {
3920
3921 /* xstorm needs to know whether to add ovlan to packets or not,
3922 * in switch-independent we'll write 0 to here... */
3923 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00003924 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003925 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00003926 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003927 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00003928 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003929 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00003930 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003931 }
3932
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08003933 if (IS_MF_SI(bp))
3934 /*
3935 * In switch independent mode, the TSTORM needs to accept
3936 * packets that failed classification, since approximate match
3937 * mac addresses aren't written to NIG LLH
3938 */
3939 REG_WR8(bp, BAR_TSTRORM_INTMEM +
3940 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
3941
Eilon Greenstein471de712008-08-13 15:49:35 -07003942 /* Zero this manually as its initialization is
3943 currently missing in the initTool */
3944 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
3945 REG_WR(bp, BAR_USTRORM_INTMEM +
3946 USTORM_AGG_DATA_OFFSET + i * 4, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003947 if (CHIP_IS_E2(bp)) {
3948 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
3949 CHIP_INT_MODE_IS_BC(bp) ?
3950 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
3951 }
Eilon Greenstein471de712008-08-13 15:49:35 -07003952}
3953
3954static void bnx2x_init_internal_port(struct bnx2x *bp)
3955{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003956 /* port */
Vladislav Zolotarove4901dd2010-12-13 05:44:18 +00003957 bnx2x_dcb_init_intmem_pfc(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003958}
3959
Eilon Greenstein471de712008-08-13 15:49:35 -07003960static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
3961{
3962 switch (load_code) {
3963 case FW_MSG_CODE_DRV_LOAD_COMMON:
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003964 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
Eilon Greenstein471de712008-08-13 15:49:35 -07003965 bnx2x_init_internal_common(bp);
3966 /* no break */
3967
3968 case FW_MSG_CODE_DRV_LOAD_PORT:
3969 bnx2x_init_internal_port(bp);
3970 /* no break */
3971
3972 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003973 /* internal memory per function is
3974 initialized inside bnx2x_pf_init */
Eilon Greenstein471de712008-08-13 15:49:35 -07003975 break;
3976
3977 default:
3978 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
3979 break;
3980 }
3981}
3982
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003983static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
3984{
3985 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
3986
3987 fp->state = BNX2X_FP_STATE_CLOSED;
3988
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003989 fp->cid = fp_idx;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003990 fp->cl_id = BP_L_ID(bp) + fp_idx;
3991 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
3992 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
3993 /* qZone id equals to FW (per path) client id */
3994 fp->cl_qzone_id = fp->cl_id +
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003995 BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
3996 ETH_MAX_RX_CLIENTS_E1H);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003997 /* init shortcut */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003998 fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
3999 USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004000 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4001 /* Setup SB indicies */
4002 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4003 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4004
4005 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
4006 "cl_id %d fw_sb %d igu_sb %d\n",
4007 fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4008 fp->igu_sb_id);
4009 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4010 fp->fw_sb_id, fp->igu_sb_id);
4011
4012 bnx2x_update_fpsb_idx(fp);
4013}
4014
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004015void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004016{
4017 int i;
4018
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004019 for_each_eth_queue(bp, i)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004020 bnx2x_init_fp_sb(bp, i);
Michael Chan37b091b2009-10-10 13:46:55 +00004021#ifdef BCM_CNIC
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004022 if (!NO_FCOE(bp))
4023 bnx2x_init_fcoe_fp(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004024
4025 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4026 BNX2X_VF_ID_INVALID, false,
4027 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4028
Michael Chan37b091b2009-10-10 13:46:55 +00004029#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004030
Yaniv Rosner020c7e32011-05-31 21:28:43 +00004031 /* Initialize MOD_ABS interrupts */
4032 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
4033 bp->common.shmem_base, bp->common.shmem2_base,
4034 BP_PORT(bp));
Eilon Greenstein16119782009-03-02 07:59:27 +00004035 /* ensure status block indices were read */
4036 rmb();
4037
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004038 bnx2x_init_def_sb(bp);
Eilon Greenstein5c862842008-08-13 15:51:48 -07004039 bnx2x_update_dsb_idx(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004040 bnx2x_init_rx_rings(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004041 bnx2x_init_tx_rings(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004042 bnx2x_init_sp_ring(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004043 bnx2x_init_eq_ring(bp);
Eilon Greenstein471de712008-08-13 15:49:35 -07004044 bnx2x_init_internal(bp, load_code);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004045 bnx2x_pf_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004046 bnx2x_init_ind_table(bp);
Eilon Greenstein0ef00452009-01-14 21:31:08 -08004047 bnx2x_stats_init(bp);
4048
Eilon Greenstein0ef00452009-01-14 21:31:08 -08004049 /* flush all before enabling interrupts */
4050 mb();
4051 mmiowb();
4052
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08004053 bnx2x_int_enable(bp);
Eilon Greensteineb8da202009-07-21 05:47:30 +00004054
4055 /* Check for SPIO5 */
4056 bnx2x_attn_int_deasserted0(bp,
4057 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4058 AEU_INPUTS_ATTN_BITS_SPIO5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004059}
4060
4061/* end of nic init */
4062
4063/*
4064 * gzip service functions
4065 */
4066
4067static int bnx2x_gunzip_init(struct bnx2x *bp)
4068{
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004069 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4070 &bp->gunzip_mapping, GFP_KERNEL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004071 if (bp->gunzip_buf == NULL)
4072 goto gunzip_nomem1;
4073
4074 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4075 if (bp->strm == NULL)
4076 goto gunzip_nomem2;
4077
4078 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4079 GFP_KERNEL);
4080 if (bp->strm->workspace == NULL)
4081 goto gunzip_nomem3;
4082
4083 return 0;
4084
4085gunzip_nomem3:
4086 kfree(bp->strm);
4087 bp->strm = NULL;
4088
4089gunzip_nomem2:
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004090 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4091 bp->gunzip_mapping);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004092 bp->gunzip_buf = NULL;
4093
4094gunzip_nomem1:
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004095 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4096 " un-compression\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004097 return -ENOMEM;
4098}
4099
4100static void bnx2x_gunzip_end(struct bnx2x *bp)
4101{
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004102 if (bp->strm) {
4103 kfree(bp->strm->workspace);
4104 kfree(bp->strm);
4105 bp->strm = NULL;
4106 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004107
4108 if (bp->gunzip_buf) {
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004109 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4110 bp->gunzip_mapping);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004111 bp->gunzip_buf = NULL;
4112 }
4113}
4114
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004115static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004116{
4117 int n, rc;
4118
4119 /* check gzip header */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004120 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4121 BNX2X_ERR("Bad gzip header\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004122 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004123 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004124
4125 n = 10;
4126
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004127#define FNAME 0x8
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004128
4129 if (zbuf[3] & FNAME)
4130 while ((zbuf[n++] != 0) && (n < len));
4131
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004132 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004133 bp->strm->avail_in = len - n;
4134 bp->strm->next_out = bp->gunzip_buf;
4135 bp->strm->avail_out = FW_BUF_SIZE;
4136
4137 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4138 if (rc != Z_OK)
4139 return rc;
4140
4141 rc = zlib_inflate(bp->strm, Z_FINISH);
4142 if ((rc != Z_OK) && (rc != Z_STREAM_END))
Joe Perches7995c642010-02-17 15:01:52 +00004143 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4144 bp->strm->msg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004145
4146 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4147 if (bp->gunzip_outlen & 0x3)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004148 netdev_err(bp->dev, "Firmware decompression error:"
4149 " gunzip_outlen (%d) not aligned\n",
4150 bp->gunzip_outlen);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004151 bp->gunzip_outlen >>= 2;
4152
4153 zlib_inflateEnd(bp->strm);
4154
4155 if (rc == Z_STREAM_END)
4156 return 0;
4157
4158 return rc;
4159}
4160
4161/* nic load/unload */
4162
4163/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004164 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004165 */
4166
4167/* send a NIG loopback debug packet */
4168static void bnx2x_lb_pckt(struct bnx2x *bp)
4169{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004170 u32 wb_write[3];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004171
4172 /* Ethernet source and destination addresses */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004173 wb_write[0] = 0x55555555;
4174 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004175 wb_write[2] = 0x20; /* SOP */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004176 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004177
4178 /* NON-IP protocol */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004179 wb_write[0] = 0x09000000;
4180 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004181 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004182 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004183}
4184
4185/* some of the internal memories
4186 * are not directly readable from the driver
4187 * to test them we send debug packets
4188 */
4189static int bnx2x_int_mem_test(struct bnx2x *bp)
4190{
4191 int factor;
4192 int count, i;
4193 u32 val = 0;
4194
Eilon Greensteinad8d3942008-06-23 20:29:02 -07004195 if (CHIP_REV_IS_FPGA(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004196 factor = 120;
Eilon Greensteinad8d3942008-06-23 20:29:02 -07004197 else if (CHIP_REV_IS_EMUL(bp))
4198 factor = 200;
4199 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004200 factor = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004201
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004202 /* Disable inputs of parser neighbor blocks */
4203 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4204 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4205 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004206 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004207
4208 /* Write 0 to parser credits for CFC search request */
4209 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4210
4211 /* send Ethernet packet */
4212 bnx2x_lb_pckt(bp);
4213
4214 /* TODO do i reset NIG statistic? */
4215 /* Wait until NIG register shows 1 packet of size 0x10 */
4216 count = 1000 * factor;
4217 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004218
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004219 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4220 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004221 if (val == 0x10)
4222 break;
4223
4224 msleep(10);
4225 count--;
4226 }
4227 if (val != 0x10) {
4228 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4229 return -1;
4230 }
4231
4232 /* Wait until PRS register shows 1 packet */
4233 count = 1000 * factor;
4234 while (count) {
4235 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004236 if (val == 1)
4237 break;
4238
4239 msleep(10);
4240 count--;
4241 }
4242 if (val != 0x1) {
4243 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4244 return -2;
4245 }
4246
4247 /* Reset and init BRB, PRS */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004248 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004249 msleep(50);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004250 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004251 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004252 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4253 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004254
4255 DP(NETIF_MSG_HW, "part2\n");
4256
4257 /* Disable inputs of parser neighbor blocks */
4258 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4259 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4260 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004261 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004262
4263 /* Write 0 to parser credits for CFC search request */
4264 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4265
4266 /* send 10 Ethernet packets */
4267 for (i = 0; i < 10; i++)
4268 bnx2x_lb_pckt(bp);
4269
4270 /* Wait until NIG register shows 10 + 1
4271 packets of size 11*0x10 = 0xb0 */
4272 count = 1000 * factor;
4273 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004274
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004275 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4276 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004277 if (val == 0xb0)
4278 break;
4279
4280 msleep(10);
4281 count--;
4282 }
4283 if (val != 0xb0) {
4284 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4285 return -3;
4286 }
4287
4288 /* Wait until PRS register shows 2 packets */
4289 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4290 if (val != 2)
4291 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4292
4293 /* Write 1 to parser credits for CFC search request */
4294 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4295
4296 /* Wait until PRS register shows 3 packets */
4297 msleep(10 * factor);
4298 /* Wait until NIG register shows 1 packet of size 0x10 */
4299 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4300 if (val != 3)
4301 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4302
4303 /* clear NIG EOP FIFO */
4304 for (i = 0; i < 11; i++)
4305 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4306 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4307 if (val != 1) {
4308 BNX2X_ERR("clear of NIG failed\n");
4309 return -4;
4310 }
4311
4312 /* Reset and init BRB, PRS, NIG */
4313 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4314 msleep(50);
4315 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4316 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004317 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4318 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00004319#ifndef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004320 /* set NIC mode */
4321 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4322#endif
4323
4324 /* Enable inputs of parser neighbor blocks */
4325 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4326 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4327 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004328 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004329
4330 DP(NETIF_MSG_HW, "done\n");
4331
4332 return 0; /* OK */
4333}
4334
Vladislav Zolotarov4a33bc02011-01-09 02:20:04 +00004335static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004336{
4337 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004338 if (CHIP_IS_E2(bp))
4339 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4340 else
4341 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004342 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4343 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004344 /*
4345 * mask read length error interrupts in brb for parser
4346 * (parsing unit and 'checksum and crc' unit)
4347 * these errors are legal (PU reads fixed length and CAC can cause
4348 * read length error on truncated packets)
4349 */
4350 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004351 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4352 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4353 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4354 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4355 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004356/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4357/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004358 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4359 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4360 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004361/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4362/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004363 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4364 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4365 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4366 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004367/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4368/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004369
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004370 if (CHIP_REV_IS_FPGA(bp))
4371 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004372 else if (CHIP_IS_E2(bp))
4373 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4374 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4375 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4376 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4377 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4378 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004379 else
4380 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004381 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4382 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4383 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004384/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4385/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004386 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4387 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004388/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
Vladislav Zolotarov4a33bc02011-01-09 02:20:04 +00004389 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004390}
4391
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00004392static void bnx2x_reset_common(struct bnx2x *bp)
4393{
4394 /* reset_common */
4395 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4396 0xd3ffff7f);
4397 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4398}
4399
Eilon Greenstein573f2032009-08-12 08:24:14 +00004400static void bnx2x_init_pxp(struct bnx2x *bp)
4401{
4402 u16 devctl;
4403 int r_order, w_order;
4404
4405 pci_read_config_word(bp->pdev,
4406 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4407 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4408 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4409 if (bp->mrrs == -1)
4410 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4411 else {
4412 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4413 r_order = bp->mrrs;
4414 }
4415
4416 bnx2x_init_pxp_arb(bp, r_order, w_order);
4417}
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004418
4419static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4420{
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004421 int is_required;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004422 u32 val;
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004423 int port;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004424
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004425 if (BP_NOMCP(bp))
4426 return;
4427
4428 is_required = 0;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004429 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4430 SHARED_HW_CFG_FAN_FAILURE_MASK;
4431
4432 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4433 is_required = 1;
4434
4435 /*
4436 * The fan failure mechanism is usually related to the PHY type since
4437 * the power consumption of the board is affected by the PHY. Currently,
4438 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4439 */
4440 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4441 for (port = PORT_0; port < PORT_MAX; port++) {
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004442 is_required |=
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004443 bnx2x_fan_failure_det_req(
4444 bp,
4445 bp->common.shmem_base,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00004446 bp->common.shmem2_base,
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004447 port);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004448 }
4449
4450 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4451
4452 if (is_required == 0)
4453 return;
4454
4455 /* Fan failure is indicated by SPIO 5 */
4456 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4457 MISC_REGISTERS_SPIO_INPUT_HI_Z);
4458
4459 /* set to active low mode */
4460 val = REG_RD(bp, MISC_REG_SPIO_INT);
4461 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004462 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004463 REG_WR(bp, MISC_REG_SPIO_INT, val);
4464
4465 /* enable interrupt to signal the IGU */
4466 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4467 val |= (1 << MISC_REGISTERS_SPIO_5);
4468 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4469}
4470
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004471static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4472{
4473 u32 offset = 0;
4474
4475 if (CHIP_IS_E1(bp))
4476 return;
4477 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4478 return;
4479
4480 switch (BP_ABS_FUNC(bp)) {
4481 case 0:
4482 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4483 break;
4484 case 1:
4485 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4486 break;
4487 case 2:
4488 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4489 break;
4490 case 3:
4491 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4492 break;
4493 case 4:
4494 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4495 break;
4496 case 5:
4497 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4498 break;
4499 case 6:
4500 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4501 break;
4502 case 7:
4503 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4504 break;
4505 default:
4506 return;
4507 }
4508
4509 REG_WR(bp, offset, pretend_func_num);
4510 REG_RD(bp, offset);
4511 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4512}
4513
4514static void bnx2x_pf_disable(struct bnx2x *bp)
4515{
4516 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4517 val &= ~IGU_PF_CONF_FUNC_EN;
4518
4519 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4520 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4521 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4522}
4523
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004524static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004525{
4526 u32 val, i;
4527
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004528 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004529
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00004530 bnx2x_reset_common(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004531 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4532 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
4533
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004534 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004535 if (!CHIP_IS_E1(bp))
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004536 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004537
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004538 if (CHIP_IS_E2(bp)) {
4539 u8 fid;
4540
4541 /**
4542 * 4-port mode or 2-port mode we need to turn of master-enable
4543 * for everyone, after that, turn it back on for self.
4544 * so, we disregard multi-function or not, and always disable
4545 * for all functions on the given path, this means 0,2,4,6 for
4546 * path 0 and 1,3,5,7 for path 1
4547 */
4548 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
4549 if (fid == BP_ABS_FUNC(bp)) {
4550 REG_WR(bp,
4551 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
4552 1);
4553 continue;
4554 }
4555
4556 bnx2x_pretend_func(bp, fid);
4557 /* clear pf enable */
4558 bnx2x_pf_disable(bp);
4559 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4560 }
4561 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004562
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004563 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004564 if (CHIP_IS_E1(bp)) {
4565 /* enable HW interrupt from PXP on USDM overflow
4566 bit 16 on INT_MASK_0 */
4567 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004568 }
4569
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004570 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004571 bnx2x_init_pxp(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004572
4573#ifdef __BIG_ENDIAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004574 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
4575 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
4576 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
4577 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
4578 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
Eilon Greenstein8badd272009-02-12 08:36:15 +00004579 /* make sure this value is 0 */
4580 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004581
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004582/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
4583 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
4584 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
4585 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
4586 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004587#endif
4588
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004589 bnx2x_ilt_init_page_size(bp, INITOP_SET);
4590
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004591 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
4592 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004593
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004594 /* let the HW do it's magic ... */
4595 msleep(100);
4596 /* finish PXP init */
4597 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
4598 if (val != 1) {
4599 BNX2X_ERR("PXP2 CFG failed\n");
4600 return -EBUSY;
4601 }
4602 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
4603 if (val != 1) {
4604 BNX2X_ERR("PXP2 RD_INIT failed\n");
4605 return -EBUSY;
4606 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004607
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004608 /* Timers bug workaround E2 only. We need to set the entire ILT to
4609 * have entries with value "0" and valid bit on.
4610 * This needs to be done by the first PF that is loaded in a path
4611 * (i.e. common phase)
4612 */
4613 if (CHIP_IS_E2(bp)) {
4614 struct ilt_client_info ilt_cli;
4615 struct bnx2x_ilt ilt;
4616 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
4617 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
4618
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04004619 /* initialize dummy TM client */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004620 ilt_cli.start = 0;
4621 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
4622 ilt_cli.client_num = ILT_CLIENT_TM;
4623
4624 /* Step 1: set zeroes to all ilt page entries with valid bit on
4625 * Step 2: set the timers first/last ilt entry to point
4626 * to the entire range to prevent ILT range error for 3rd/4th
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004627 * vnic (this code assumes existence of the vnic)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004628 *
4629 * both steps performed by call to bnx2x_ilt_client_init_op()
4630 * with dummy TM client
4631 *
4632 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
4633 * and his brother are split registers
4634 */
4635 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
4636 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
4637 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4638
4639 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
4640 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
4641 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
4642 }
4643
4644
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004645 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
4646 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004647
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004648 if (CHIP_IS_E2(bp)) {
4649 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
4650 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
4651 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
4652
4653 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
4654
4655 /* let the HW do it's magic ... */
4656 do {
4657 msleep(200);
4658 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
4659 } while (factor-- && (val != 1));
4660
4661 if (val != 1) {
4662 BNX2X_ERR("ATC_INIT failed\n");
4663 return -EBUSY;
4664 }
4665 }
4666
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004667 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004668
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004669 /* clean the DMAE memory */
4670 bp->dmae_ready = 1;
4671 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004672
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004673 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
4674 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
4675 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
4676 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004677
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004678 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
4679 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
4680 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
4681 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
4682
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004683 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00004684
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004685 if (CHIP_MODE_IS_4_PORT(bp))
4686 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004687
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004688 /* QM queues pointers table */
4689 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
Michael Chan37b091b2009-10-10 13:46:55 +00004690
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004691 /* soft reset pulse */
4692 REG_WR(bp, QM_REG_SOFT_RESET, 1);
4693 REG_WR(bp, QM_REG_SOFT_RESET, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004694
Michael Chan37b091b2009-10-10 13:46:55 +00004695#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004696 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004697#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004698
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004699 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004700 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
4701
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004702 if (!CHIP_REV_IS_SLOW(bp)) {
4703 /* enable hw interrupt from doorbell Q */
4704 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4705 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004706
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004707 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004708 if (CHIP_MODE_IS_4_PORT(bp)) {
4709 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
4710 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
4711 }
4712
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004713 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08004714 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
Michael Chan37b091b2009-10-10 13:46:55 +00004715#ifndef BCM_CNIC
Eilon Greenstein3196a882008-08-13 15:58:49 -07004716 /* set NIC mode */
4717 REG_WR(bp, PRS_REG_NIC_MODE, 1);
Michael Chan37b091b2009-10-10 13:46:55 +00004718#endif
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004719 if (!CHIP_IS_E1(bp))
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08004720 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF_SD(bp));
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004721
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004722 if (CHIP_IS_E2(bp)) {
4723 /* Bit-map indicating which L2 hdrs may appear after the
4724 basic Ethernet header */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08004725 int has_ovlan = IS_MF_SD(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004726 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
4727 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
4728 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004729
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004730 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
4731 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
4732 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
4733 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004734
Eilon Greensteinca003922009-08-12 22:53:28 -07004735 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4736 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4737 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4738 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004739
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004740 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
4741 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
4742 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
4743 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004744
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004745 if (CHIP_MODE_IS_4_PORT(bp))
4746 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
4747
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004748 /* sync semi rtc */
4749 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4750 0x80000000);
4751 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
4752 0x80000000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004753
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004754 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
4755 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
4756 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004757
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004758 if (CHIP_IS_E2(bp)) {
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08004759 int has_ovlan = IS_MF_SD(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004760 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
4761 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
4762 }
4763
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004764 REG_WR(bp, SRC_REG_SOFT_RST, 1);
Tom Herbertc68ed252010-04-23 00:10:52 -07004765 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
4766 REG_WR(bp, i, random32());
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004767
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004768 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00004769#ifdef BCM_CNIC
4770 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
4771 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
4772 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
4773 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
4774 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
4775 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
4776 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
4777 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
4778 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
4779 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
4780#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004781 REG_WR(bp, SRC_REG_SOFT_RST, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004782
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004783 if (sizeof(union cdu_context) != 1024)
4784 /* we currently assume that a context is 1024 bytes */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004785 dev_alert(&bp->pdev->dev, "please adjust the size "
4786 "of cdu_context(%ld)\n",
Joe Perches7995c642010-02-17 15:01:52 +00004787 (long)sizeof(union cdu_context));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004788
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004789 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004790 val = (4 << 24) + (0 << 12) + 1024;
4791 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004792
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004793 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004794 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08004795 /* enable context validation interrupt from CFC */
4796 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4797
4798 /* set the thresholds to prevent CFC/CDU race */
4799 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004800
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004801 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004802
4803 if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
4804 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
4805
4806 bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004807 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004808
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004809 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004810 /* Reset PCIE errors for debug */
4811 REG_WR(bp, 0x2814, 0xffffffff);
4812 REG_WR(bp, 0x3820, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004813
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004814 if (CHIP_IS_E2(bp)) {
4815 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
4816 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
4817 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
4818 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
4819 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
4820 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
4821 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
4822 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
4823 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
4824 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
4825 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
4826 }
4827
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004828 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004829 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004830 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004831 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004832
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004833 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004834 if (!CHIP_IS_E1(bp)) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004835 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08004836 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004837 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004838 if (CHIP_IS_E2(bp)) {
4839 /* Bit-map indicating which L2 hdrs may appear after the
4840 basic Ethernet header */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08004841 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF_SD(bp) ? 7 : 6));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004842 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004843
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004844 if (CHIP_REV_IS_SLOW(bp))
4845 msleep(200);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004846
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004847 /* finish CFC init */
4848 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
4849 if (val != 1) {
4850 BNX2X_ERR("CFC LL_INIT failed\n");
4851 return -EBUSY;
4852 }
4853 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
4854 if (val != 1) {
4855 BNX2X_ERR("CFC AC_INIT failed\n");
4856 return -EBUSY;
4857 }
4858 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
4859 if (val != 1) {
4860 BNX2X_ERR("CFC CAM_INIT failed\n");
4861 return -EBUSY;
4862 }
4863 REG_WR(bp, CFC_REG_DEBUG0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004864
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004865 if (CHIP_IS_E1(bp)) {
4866 /* read NIG statistic
4867 to see if this is our first up since powerup */
4868 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4869 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004870
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004871 /* do internal memory self test */
4872 if ((val == 0) && bnx2x_int_mem_test(bp)) {
4873 BNX2X_ERR("internal mem self test failed\n");
4874 return -EBUSY;
4875 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004876 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004877
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004878 bnx2x_setup_fan_failure_detection(bp);
4879
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004880 /* clear PXP2 attentions */
4881 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004882
Vladislav Zolotarov4a33bc02011-01-09 02:20:04 +00004883 bnx2x_enable_blocks_attention(bp);
4884 if (CHIP_PARITY_ENABLED(bp))
4885 bnx2x_enable_blocks_parity(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004886
Yaniv Rosner6bbca912008-08-13 15:57:28 -07004887 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004888 /* In E2 2-PORT mode, same ext phy is used for the two paths */
4889 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
4890 CHIP_IS_E1x(bp)) {
4891 u32 shmem_base[2], shmem2_base[2];
4892 shmem_base[0] = bp->common.shmem_base;
4893 shmem2_base[0] = bp->common.shmem2_base;
4894 if (CHIP_IS_E2(bp)) {
4895 shmem_base[1] =
4896 SHMEM2_RD(bp, other_shmem_base_addr);
4897 shmem2_base[1] =
4898 SHMEM2_RD(bp, other_shmem2_base_addr);
4899 }
4900 bnx2x_acquire_phy_lock(bp);
4901 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
4902 bp->common.chip_id);
4903 bnx2x_release_phy_lock(bp);
4904 }
Yaniv Rosner6bbca912008-08-13 15:57:28 -07004905 } else
4906 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
4907
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004908 return 0;
4909}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004910
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004911static int bnx2x_init_hw_port(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004912{
4913 int port = BP_PORT(bp);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004914 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
Eilon Greenstein1c063282009-02-12 08:36:43 +00004915 u32 low, high;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004916 u32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004917
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004918 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004919
4920 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004921
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004922 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004923 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07004924
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004925 /* Timers bug workaround: disables the pf_master bit in pglue at
4926 * common phase, we need to enable it here before any dmae access are
4927 * attempted. Therefore we manually added the enable-master to the
4928 * port phase (it also happens in the function phase)
4929 */
4930 if (CHIP_IS_E2(bp))
4931 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
4932
Eilon Greensteinca003922009-08-12 22:53:28 -07004933 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
4934 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
4935 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004936 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004937
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004938 /* QM cid (connection) count */
4939 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004940
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004941#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004942 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
Michael Chan37b091b2009-10-10 13:46:55 +00004943 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
4944 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004945#endif
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004946
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004947 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00004948
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004949 if (CHIP_MODE_IS_4_PORT(bp))
4950 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00004951
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004952 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
4953 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
4954 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
4955 /* no pause for emulation and FPGA */
4956 low = 0;
4957 high = 513;
4958 } else {
4959 if (IS_MF(bp))
4960 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
4961 else if (bp->dev->mtu > 4096) {
4962 if (bp->flags & ONE_PORT_FLAG)
4963 low = 160;
4964 else {
4965 val = bp->dev->mtu;
4966 /* (24*1024 + val*4)/256 */
4967 low = 96 + (val/64) +
4968 ((val % 64) ? 1 : 0);
4969 }
4970 } else
4971 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
4972 high = low + 56; /* 14*1024/256 */
4973 }
4974 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
4975 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
4976 }
4977
4978 if (CHIP_MODE_IS_4_PORT(bp)) {
4979 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
4980 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
4981 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
4982 BRB1_REG_MAC_GUARANTIED_0), 40);
4983 }
Eilon Greenstein1c063282009-02-12 08:36:43 +00004984
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004985 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07004986
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004987 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004988 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004989 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004990 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00004991
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004992 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
4993 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
4994 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
4995 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004996 if (CHIP_MODE_IS_4_PORT(bp))
4997 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00004998
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004999 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005000 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005001
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005002 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005003
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005004 if (!CHIP_IS_E2(bp)) {
5005 /* configure PBF to work without PAUSE mtu 9000 */
5006 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005007
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005008 /* update threshold */
5009 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5010 /* update init credit */
5011 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005012
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005013 /* probe changes */
5014 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5015 udelay(50);
5016 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5017 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005018
Michael Chan37b091b2009-10-10 13:46:55 +00005019#ifdef BCM_CNIC
5020 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005021#endif
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005022 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005023 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005024
5025 if (CHIP_IS_E1(bp)) {
5026 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5027 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5028 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005029 bnx2x_init_block(bp, HC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005030
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005031 bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5032
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005033 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005034 /* init aeu_mask_attn_func_0/1:
5035 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5036 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5037 * bits 4-7 are used for "per vn group attention" */
Vladislav Zolotarove4901dd2010-12-13 05:44:18 +00005038 val = IS_MF(bp) ? 0xF7 : 0x7;
5039 /* Enable DCBX attention for all but E1 */
5040 val |= CHIP_IS_E1(bp) ? 0 : 0x10;
5041 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005042
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005043 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005044 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005045 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005046 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005047 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005048
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005049 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005050
5051 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5052
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005053 if (!CHIP_IS_E1(bp)) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005054 /* 0x2 disable mf_ov, 0x1 enable */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005055 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005056 (IS_MF_SD(bp) ? 0x1 : 0x2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005057
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005058 if (CHIP_IS_E2(bp)) {
5059 val = 0;
5060 switch (bp->mf_mode) {
5061 case MULTI_FUNCTION_SD:
5062 val = 1;
5063 break;
5064 case MULTI_FUNCTION_SI:
5065 val = 2;
5066 break;
5067 }
5068
5069 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5070 NIG_REG_LLH0_CLS_TYPE), val);
5071 }
Eilon Greenstein1c063282009-02-12 08:36:43 +00005072 {
5073 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5074 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5075 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5076 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005077 }
5078
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005079 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005080 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00005081 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00005082 bp->common.shmem2_base, port)) {
Eilon Greenstein4d295db2009-07-21 05:47:47 +00005083 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5084 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5085 val = REG_RD(bp, reg_addr);
Eliezer Tamirf1410642008-02-28 11:51:50 -08005086 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
Eilon Greenstein4d295db2009-07-21 05:47:47 +00005087 REG_WR(bp, reg_addr, val);
Eliezer Tamirf1410642008-02-28 11:51:50 -08005088 }
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07005089 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005090
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005091 return 0;
5092}
5093
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005094static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5095{
5096 int reg;
5097
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005098 if (CHIP_IS_E1(bp))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005099 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005100 else
5101 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005102
5103 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5104}
5105
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005106static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5107{
5108 bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5109}
5110
5111static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5112{
5113 u32 i, base = FUNC_ILT_BASE(func);
5114 for (i = base; i < base + ILT_PER_FUNC; i++)
5115 bnx2x_ilt_wr(bp, i, 0);
5116}
5117
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005118static int bnx2x_init_hw_func(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005119{
5120 int port = BP_PORT(bp);
5121 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005122 struct bnx2x_ilt *ilt = BP_ILT(bp);
5123 u16 cdu_ilt_start;
Eilon Greenstein8badd272009-02-12 08:36:15 +00005124 u32 addr, val;
Vladislav Zolotarovf4a66892010-10-19 05:13:09 +00005125 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
5126 int i, main_mem_width;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005127
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005128 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005129
Eilon Greenstein8badd272009-02-12 08:36:15 +00005130 /* set MSI reconfigure capability */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005131 if (bp->common.int_block == INT_BLOCK_HC) {
5132 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5133 val = REG_RD(bp, addr);
5134 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5135 REG_WR(bp, addr, val);
5136 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00005137
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005138 ilt = BP_ILT(bp);
5139 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005140
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005141 for (i = 0; i < L2_ILT_LINES(bp); i++) {
5142 ilt->lines[cdu_ilt_start + i].page =
5143 bp->context.vcxt + (ILT_PAGE_CIDS * i);
5144 ilt->lines[cdu_ilt_start + i].page_mapping =
5145 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
5146 /* cdu ilt pages are allocated manually so there's no need to
5147 set the size */
5148 }
5149 bnx2x_ilt_init_op(bp, INITOP_SET);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005150
Michael Chan37b091b2009-10-10 13:46:55 +00005151#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005152 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
Michael Chan37b091b2009-10-10 13:46:55 +00005153
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005154 /* T1 hash bits value determines the T1 number of entries */
5155 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
Michael Chan37b091b2009-10-10 13:46:55 +00005156#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005157
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005158#ifndef BCM_CNIC
5159 /* set NIC mode */
5160 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5161#endif /* BCM_CNIC */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005162
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005163 if (CHIP_IS_E2(bp)) {
5164 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5165
5166 /* Turn on a single ISR mode in IGU if driver is going to use
5167 * INT#x or MSI
5168 */
5169 if (!(bp->flags & USING_MSIX_FLAG))
5170 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5171 /*
5172 * Timers workaround bug: function init part.
5173 * Need to wait 20msec after initializing ILT,
5174 * needed to make sure there are no requests in
5175 * one of the PXP internal queues with "old" ILT addresses
5176 */
5177 msleep(20);
5178 /*
5179 * Master enable - Due to WB DMAE writes performed before this
5180 * register is re-initialized as part of the regular function
5181 * init
5182 */
5183 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5184 /* Enable the function in IGU */
5185 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5186 }
5187
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005188 bp->dmae_ready = 1;
5189
5190 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
5191
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005192 if (CHIP_IS_E2(bp))
5193 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5194
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005195 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5196 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5197 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5198 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5199 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5200 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5201 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5202 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5203 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5204
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005205 if (CHIP_IS_E2(bp)) {
5206 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5207 BP_PATH(bp));
5208 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5209 BP_PATH(bp));
5210 }
5211
5212 if (CHIP_MODE_IS_4_PORT(bp))
5213 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5214
5215 if (CHIP_IS_E2(bp))
5216 REG_WR(bp, QM_REG_PF_EN, 1);
5217
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005218 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005219
5220 if (CHIP_MODE_IS_4_PORT(bp))
5221 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5222
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005223 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5224 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5225 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5226 bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5227 bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5228 bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5229 bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5230 bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5231 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5232 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5233 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005234 if (CHIP_IS_E2(bp))
5235 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5236
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005237 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
5238
5239 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
5240
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005241 if (CHIP_IS_E2(bp))
5242 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5243
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005244 if (IS_MF(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005245 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005246 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005247 }
5248
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005249 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5250
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005251 /* HC init per function */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005252 if (bp->common.int_block == INT_BLOCK_HC) {
5253 if (CHIP_IS_E1H(bp)) {
5254 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5255
5256 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5257 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5258 }
5259 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5260
5261 } else {
5262 int num_segs, sb_idx, prod_offset;
5263
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005264 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5265
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005266 if (CHIP_IS_E2(bp)) {
5267 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5268 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5269 }
5270
5271 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5272
5273 if (CHIP_IS_E2(bp)) {
5274 int dsb_idx = 0;
5275 /**
5276 * Producer memory:
5277 * E2 mode: address 0-135 match to the mapping memory;
5278 * 136 - PF0 default prod; 137 - PF1 default prod;
5279 * 138 - PF2 default prod; 139 - PF3 default prod;
5280 * 140 - PF0 attn prod; 141 - PF1 attn prod;
5281 * 142 - PF2 attn prod; 143 - PF3 attn prod;
5282 * 144-147 reserved.
5283 *
5284 * E1.5 mode - In backward compatible mode;
5285 * for non default SB; each even line in the memory
5286 * holds the U producer and each odd line hold
5287 * the C producer. The first 128 producers are for
5288 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5289 * producers are for the DSB for each PF.
5290 * Each PF has five segments: (the order inside each
5291 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5292 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5293 * 144-147 attn prods;
5294 */
5295 /* non-default-status-blocks */
5296 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5297 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5298 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5299 prod_offset = (bp->igu_base_sb + sb_idx) *
5300 num_segs;
5301
5302 for (i = 0; i < num_segs; i++) {
5303 addr = IGU_REG_PROD_CONS_MEMORY +
5304 (prod_offset + i) * 4;
5305 REG_WR(bp, addr, 0);
5306 }
5307 /* send consumer update with value 0 */
5308 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5309 USTORM_ID, 0, IGU_INT_NOP, 1);
5310 bnx2x_igu_clear_sb(bp,
5311 bp->igu_base_sb + sb_idx);
5312 }
5313
5314 /* default-status-blocks */
5315 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5316 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5317
5318 if (CHIP_MODE_IS_4_PORT(bp))
5319 dsb_idx = BP_FUNC(bp);
5320 else
5321 dsb_idx = BP_E1HVN(bp);
5322
5323 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5324 IGU_BC_BASE_DSB_PROD + dsb_idx :
5325 IGU_NORM_BASE_DSB_PROD + dsb_idx);
5326
5327 for (i = 0; i < (num_segs * E1HVN_MAX);
5328 i += E1HVN_MAX) {
5329 addr = IGU_REG_PROD_CONS_MEMORY +
5330 (prod_offset + i)*4;
5331 REG_WR(bp, addr, 0);
5332 }
5333 /* send consumer update with 0 */
5334 if (CHIP_INT_MODE_IS_BC(bp)) {
5335 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5336 USTORM_ID, 0, IGU_INT_NOP, 1);
5337 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5338 CSTORM_ID, 0, IGU_INT_NOP, 1);
5339 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5340 XSTORM_ID, 0, IGU_INT_NOP, 1);
5341 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5342 TSTORM_ID, 0, IGU_INT_NOP, 1);
5343 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5344 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5345 } else {
5346 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5347 USTORM_ID, 0, IGU_INT_NOP, 1);
5348 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5349 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5350 }
5351 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5352
5353 /* !!! these should become driver const once
5354 rf-tool supports split-68 const */
5355 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5356 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5357 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5358 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5359 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5360 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5361 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005362 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005363
Eliezer Tamirc14423f2008-02-28 11:49:42 -08005364 /* Reset PCIE errors for debug */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005365 REG_WR(bp, 0x2114, 0xffffffff);
5366 REG_WR(bp, 0x2120, 0xffffffff);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005367
5368 bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5369 bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5370 bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5371 bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5372 bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5373 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5374
Vladislav Zolotarovf4a66892010-10-19 05:13:09 +00005375 if (CHIP_IS_E1x(bp)) {
5376 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
5377 main_mem_base = HC_REG_MAIN_MEMORY +
5378 BP_PORT(bp) * (main_mem_size * 4);
5379 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
5380 main_mem_width = 8;
5381
5382 val = REG_RD(bp, main_mem_prty_clr);
5383 if (val)
5384 DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
5385 "block during "
5386 "function init (0x%x)!\n", val);
5387
5388 /* Clear "false" parity errors in MSI-X table */
5389 for (i = main_mem_base;
5390 i < main_mem_base + main_mem_size * 4;
5391 i += main_mem_width) {
5392 bnx2x_read_dmae(bp, i, main_mem_width / 4);
5393 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
5394 i, main_mem_width / 4);
5395 }
5396 /* Clear HC parity attention */
5397 REG_RD(bp, main_mem_prty_clr);
5398 }
5399
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00005400 bnx2x_phy_probe(&bp->link_params);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005401
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005402 return 0;
5403}
5404
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005405int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005406{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005407 int rc = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005408
5409 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005410 BP_ABS_FUNC(bp), load_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005411
5412 bp->dmae_ready = 0;
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08005413 spin_lock_init(&bp->dmae_lock);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005414
5415 switch (load_code) {
5416 case FW_MSG_CODE_DRV_LOAD_COMMON:
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005417 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005418 rc = bnx2x_init_hw_common(bp, load_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005419 if (rc)
5420 goto init_hw_err;
5421 /* no break */
5422
5423 case FW_MSG_CODE_DRV_LOAD_PORT:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005424 rc = bnx2x_init_hw_port(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005425 if (rc)
5426 goto init_hw_err;
5427 /* no break */
5428
5429 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005430 rc = bnx2x_init_hw_func(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005431 if (rc)
5432 goto init_hw_err;
5433 break;
5434
5435 default:
5436 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5437 break;
5438 }
5439
5440 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005441 int mb_idx = BP_FW_MB_IDX(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005442
5443 bp->fw_drv_pulse_wr_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005444 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005445 DRV_PULSE_SEQ_MASK);
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00005446 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5447 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005448
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005449init_hw_err:
5450 bnx2x_gunzip_end(bp);
5451
5452 return rc;
5453}
5454
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005455void bnx2x_free_mem(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005456{
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00005457 bnx2x_gunzip_end(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005458
5459 /* fastpath */
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00005460 bnx2x_free_fp_mem(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005461 /* end of fastpath */
5462
5463 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005464 sizeof(struct host_sp_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005465
5466 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005467 sizeof(struct bnx2x_slowpath));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005468
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005469 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5470 bp->context.size);
5471
5472 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5473
5474 BNX2X_FREE(bp->ilt->lines);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005475
Michael Chan37b091b2009-10-10 13:46:55 +00005476#ifdef BCM_CNIC
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005477 if (CHIP_IS_E2(bp))
5478 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
5479 sizeof(struct host_hc_status_block_e2));
5480 else
5481 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5482 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005483
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005484 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005485#endif
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005486
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005487 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005488
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005489 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
5490 BCM_PAGE_SIZE * NUM_EQ_PAGES);
5491
Tom Herbertab532cf2011-02-16 10:27:02 +00005492 BNX2X_FREE(bp->rx_indir_table);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005493}
5494
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005495
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005496int bnx2x_alloc_mem(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005497{
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00005498 if (bnx2x_gunzip_init(bp))
5499 return -ENOMEM;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005500
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005501#ifdef BCM_CNIC
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005502 if (CHIP_IS_E2(bp))
5503 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
5504 sizeof(struct host_hc_status_block_e2));
5505 else
5506 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
5507 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005508
5509 /* allocate searcher T2 table */
5510 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
5511#endif
5512
5513
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005514 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005515 sizeof(struct host_sp_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005516
5517 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5518 sizeof(struct bnx2x_slowpath));
5519
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005520 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005521
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005522 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
5523 bp->context.size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005524
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005525 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005526
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005527 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
5528 goto alloc_mem_err;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005529
5530 /* Slow path ring */
5531 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5532
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005533 /* EQ */
5534 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
5535 BCM_PAGE_SIZE * NUM_EQ_PAGES);
Tom Herbertab532cf2011-02-16 10:27:02 +00005536
5537 BNX2X_ALLOC(bp->rx_indir_table, sizeof(bp->rx_indir_table[0]) *
5538 TSTORM_INDIRECTION_TABLE_SIZE);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00005539
5540 /* fastpath */
5541 /* need to be done at the end, since it's self adjusting to amount
5542 * of memory available for RSS queues
5543 */
5544 if (bnx2x_alloc_fp_mem(bp))
5545 goto alloc_mem_err;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005546 return 0;
5547
5548alloc_mem_err:
5549 bnx2x_free_mem(bp);
5550 return -ENOMEM;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005551}
5552
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005553/*
5554 * Init service functions
5555 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005556int bnx2x_func_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005557{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005558 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005559
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005560 /* Wait for completion */
5561 return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
5562 WAIT_RAMROD_COMMON);
5563}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005564
stephen hemminger8d962862010-10-21 07:50:56 +00005565static int bnx2x_func_stop(struct bnx2x *bp)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005566{
5567 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005568
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005569 /* Wait for completion */
5570 return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
5571 0, &(bp->state), WAIT_RAMROD_COMMON);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005572}
5573
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00005574int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
stephen hemminger8d962862010-10-21 07:50:56 +00005575 int *state_p, int flags)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005576{
5577 /* can take a while if any port is running */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00005578 int cnt = 5000;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005579 u8 poll = flags & WAIT_RAMROD_POLL;
5580 u8 common = flags & WAIT_RAMROD_COMMON;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005581
Eliezer Tamirc14423f2008-02-28 11:49:42 -08005582 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
5583 poll ? "polling" : "waiting", state, idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005584
5585 might_sleep();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005586 while (cnt--) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005587 if (poll) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005588 if (common)
5589 bnx2x_eq_int(bp);
5590 else {
5591 bnx2x_rx_int(bp->fp, 10);
5592 /* if index is different from 0
5593 * the reply for some commands will
5594 * be on the non default queue
5595 */
5596 if (idx)
5597 bnx2x_rx_int(&bp->fp[idx], 10);
5598 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005599 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005600
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07005601 mb(); /* state is changed by bnx2x_sp_event() */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00005602 if (*state_p == state) {
5603#ifdef BNX2X_STOP_ON_ERROR
5604 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
5605#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005606 return 0;
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00005607 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005608
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005609 msleep(1);
Eilon Greensteine3553b22009-08-12 08:23:31 +00005610
5611 if (bp->panic)
5612 return -EIO;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005613 }
5614
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005615 /* timeout! */
Eliezer Tamir49d66772008-02-28 11:53:13 -08005616 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
5617 poll ? "polling" : "waiting", state, idx);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005618#ifdef BNX2X_STOP_ON_ERROR
5619 bnx2x_panic();
5620#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005621
Eliezer Tamir49d66772008-02-28 11:53:13 -08005622 return -EBUSY;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005623}
5624
stephen hemminger8d962862010-10-21 07:50:56 +00005625static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
Michael Chane665bfd2009-10-10 13:46:54 +00005626{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005627 if (CHIP_IS_E1H(bp))
5628 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
5629 else if (CHIP_MODE_IS_4_PORT(bp))
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08005630 return E2_FUNC_MAX * rel_offset + BP_FUNC(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005631 else
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08005632 return E2_FUNC_MAX * rel_offset + BP_VN(bp);
Michael Chane665bfd2009-10-10 13:46:54 +00005633}
5634
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005635/**
5636 * LLH CAM line allocations: currently only iSCSI and ETH macs are
5637 * relevant. In addition, current implementation is tuned for a
5638 * single ETH MAC.
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005639 */
5640enum {
5641 LLH_CAM_ISCSI_ETH_LINE = 0,
5642 LLH_CAM_ETH_LINE,
5643 LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE
5644};
5645
5646static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
5647 int set,
5648 unsigned char *dev_addr,
5649 int index)
5650{
5651 u32 wb_data[2];
5652 u32 mem_offset, ena_offset, mem_index;
5653 /**
5654 * indexes mapping:
5655 * 0..7 - goes to MEM
5656 * 8..15 - goes to MEM2
5657 */
5658
5659 if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
5660 return;
5661
5662 /* calculate memory start offset according to the mapping
5663 * and index in the memory */
5664 if (index < NIG_LLH_FUNC_MEM_MAX_OFFSET) {
5665 mem_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
5666 NIG_REG_LLH0_FUNC_MEM;
5667 ena_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
5668 NIG_REG_LLH0_FUNC_MEM_ENABLE;
5669 mem_index = index;
5670 } else {
5671 mem_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2 :
5672 NIG_REG_P0_LLH_FUNC_MEM2;
5673 ena_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE :
5674 NIG_REG_P0_LLH_FUNC_MEM2_ENABLE;
5675 mem_index = index - NIG_LLH_FUNC_MEM_MAX_OFFSET;
5676 }
5677
5678 if (set) {
5679 /* LLH_FUNC_MEM is a u64 WB register */
5680 mem_offset += 8*mem_index;
5681
5682 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
5683 (dev_addr[4] << 8) | dev_addr[5]);
5684 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
5685
5686 REG_WR_DMAE(bp, mem_offset, wb_data, 2);
5687 }
5688
5689 /* enable/disable the entry */
5690 REG_WR(bp, ena_offset + 4*mem_index, set);
5691
5692}
5693
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005694void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
Michael Chane665bfd2009-10-10 13:46:54 +00005695{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005696 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
5697 bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
5698
5699 /* networking MAC */
5700 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
5701 (1 << bp->fp->cl_id), cam_offset , 0);
5702
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005703 bnx2x_set_mac_in_nig(bp, set, bp->dev->dev_addr, LLH_CAM_ETH_LINE);
5704
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005705 if (CHIP_IS_E1(bp)) {
5706 /* broadcast MAC */
Joe Perches215faf92010-12-21 02:16:10 -08005707 static const u8 bcast[ETH_ALEN] = {
5708 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
5709 };
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005710 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
5711 }
5712}
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08005713
Michael Chan993ac7b2009-10-10 13:46:56 +00005714#ifdef BCM_CNIC
5715/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00005716 * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
Michael Chan993ac7b2009-10-10 13:46:56 +00005717 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00005718 * @bp: driver handle
5719 * @set: set or clear the CAM entry
Michael Chan993ac7b2009-10-10 13:46:56 +00005720 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00005721 * This function will wait until the ramdord completion returns.
5722 * Return 0 if success, -ENODEV if ramrod doesn't return.
Michael Chan993ac7b2009-10-10 13:46:56 +00005723 */
stephen hemminger8d962862010-10-21 07:50:56 +00005724static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
Michael Chan993ac7b2009-10-10 13:46:56 +00005725{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005726 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
5727 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00005728 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
5729 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005730 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00005731 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
Michael Chan993ac7b2009-10-10 13:46:56 +00005732
5733 /* Send a SET_MAC ramrod */
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00005734 bnx2x_set_mac_addr_gen(bp, set, iscsi_mac, cl_bit_vec,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005735 cam_offset, 0);
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005736
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00005737 bnx2x_set_mac_in_nig(bp, set, iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00005738
5739 return 0;
5740}
5741
5742/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00005743 * bnx2x_set_fip_eth_mac_addr - set FCoE L2 MAC(s)
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00005744 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00005745 * @bp: driver handle
5746 * @set: set or clear the CAM entry
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00005747 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00005748 * This function will wait until the ramrod completion returns.
5749 * Returns 0 if success, -ENODEV if ramrod doesn't return.
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00005750 */
5751int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set)
5752{
5753 u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
5754 /**
5755 * CAM allocation for E1H
5756 * eth unicasts: by func number
5757 * iscsi: by func number
5758 * fip unicast: by func number
5759 * fip multicast: by func number
5760 */
5761 bnx2x_set_mac_addr_gen(bp, set, bp->fip_mac,
5762 cl_bit_vec, bnx2x_e1h_cam_offset(bp, CAM_FIP_ETH_LINE), 0);
5763
5764 return 0;
5765}
5766
5767int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set)
5768{
5769 u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
5770
5771 /**
5772 * CAM allocation for E1H
5773 * eth unicasts: by func number
5774 * iscsi: by func number
5775 * fip unicast: by func number
5776 * fip multicast: by func number
5777 */
5778 bnx2x_set_mac_addr_gen(bp, set, ALL_ENODE_MACS, cl_bit_vec,
5779 bnx2x_e1h_cam_offset(bp, CAM_FIP_MCAST_LINE), 0);
5780
Michael Chan993ac7b2009-10-10 13:46:56 +00005781 return 0;
5782}
5783#endif
5784
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005785
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00005786/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00005787 * bnx2x_set_int_mode - configure interrupt mode
5788 *
5789 * @bp: driver handle
5790 *
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00005791 * In case of MSI-X it will also try to enable MSI-X.
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00005792 */
Dmitry Kravkov9ee3d372011-06-14 01:33:34 +00005793static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005794{
Eilon Greensteinca003922009-08-12 22:53:28 -07005795
Dmitry Kravkov9ee3d372011-06-14 01:33:34 +00005796 switch (int_mode) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00005797 case INT_MODE_MSI:
5798 bnx2x_enable_msi(bp);
5799 /* falling through... */
5800 case INT_MODE_INTx:
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00005801 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00005802 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
Eilon Greensteinca003922009-08-12 22:53:28 -07005803 break;
Eilon Greensteinca003922009-08-12 22:53:28 -07005804 default:
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00005805 /* Set number of queues according to bp->multi_mode value */
5806 bnx2x_set_num_queues(bp);
5807
5808 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
5809 bp->num_queues);
5810
5811 /* if we can't use MSI-X we only need one fp,
5812 * so try to enable MSI-X with the requested number of fp's
5813 * and fallback to MSI or legacy INTx with one fp
5814 */
Dmitry Kravkov9ee3d372011-06-14 01:33:34 +00005815 if (bnx2x_enable_msix(bp)) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00005816 /* failed to enable MSI-X */
5817 if (bp->multi_mode)
5818 DP(NETIF_MSG_IFUP,
5819 "Multi requested but failed to "
5820 "enable MSI-X (%d), "
5821 "set number of queues to %d\n",
5822 bp->num_queues,
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00005823 1 + NONE_ETH_CONTEXT_USE);
5824 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00005825
Dmitry Kravkov9ee3d372011-06-14 01:33:34 +00005826 /* Try to enable MSI */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00005827 if (!(bp->flags & DISABLE_MSI_FLAG))
5828 bnx2x_enable_msi(bp);
5829 }
Eilon Greensteinca003922009-08-12 22:53:28 -07005830 break;
5831 }
Eilon Greensteinca003922009-08-12 22:53:28 -07005832}
5833
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00005834/* must be called prioir to any HW initializations */
5835static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
5836{
5837 return L2_ILT_LINES(bp);
5838}
5839
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005840void bnx2x_ilt_set_info(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005841{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005842 struct ilt_client_info *ilt_client;
5843 struct bnx2x_ilt *ilt = BP_ILT(bp);
5844 u16 line = 0;
5845
5846 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
5847 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
5848
5849 /* CDU */
5850 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
5851 ilt_client->client_num = ILT_CLIENT_CDU;
5852 ilt_client->page_size = CDU_ILT_PAGE_SZ;
5853 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
5854 ilt_client->start = line;
5855 line += L2_ILT_LINES(bp);
5856#ifdef BCM_CNIC
5857 line += CNIC_ILT_LINES;
5858#endif
5859 ilt_client->end = line - 1;
5860
5861 DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
5862 "flags 0x%x, hw psz %d\n",
5863 ilt_client->start,
5864 ilt_client->end,
5865 ilt_client->page_size,
5866 ilt_client->flags,
5867 ilog2(ilt_client->page_size >> 12));
5868
5869 /* QM */
5870 if (QM_INIT(bp->qm_cid_count)) {
5871 ilt_client = &ilt->clients[ILT_CLIENT_QM];
5872 ilt_client->client_num = ILT_CLIENT_QM;
5873 ilt_client->page_size = QM_ILT_PAGE_SZ;
5874 ilt_client->flags = 0;
5875 ilt_client->start = line;
5876
5877 /* 4 bytes for each cid */
5878 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
5879 QM_ILT_PAGE_SZ);
5880
5881 ilt_client->end = line - 1;
5882
5883 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
5884 "flags 0x%x, hw psz %d\n",
5885 ilt_client->start,
5886 ilt_client->end,
5887 ilt_client->page_size,
5888 ilt_client->flags,
5889 ilog2(ilt_client->page_size >> 12));
5890
5891 }
5892 /* SRC */
5893 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
5894#ifdef BCM_CNIC
5895 ilt_client->client_num = ILT_CLIENT_SRC;
5896 ilt_client->page_size = SRC_ILT_PAGE_SZ;
5897 ilt_client->flags = 0;
5898 ilt_client->start = line;
5899 line += SRC_ILT_LINES;
5900 ilt_client->end = line - 1;
5901
5902 DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
5903 "flags 0x%x, hw psz %d\n",
5904 ilt_client->start,
5905 ilt_client->end,
5906 ilt_client->page_size,
5907 ilt_client->flags,
5908 ilog2(ilt_client->page_size >> 12));
5909
5910#else
5911 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
5912#endif
5913
5914 /* TM */
5915 ilt_client = &ilt->clients[ILT_CLIENT_TM];
5916#ifdef BCM_CNIC
5917 ilt_client->client_num = ILT_CLIENT_TM;
5918 ilt_client->page_size = TM_ILT_PAGE_SZ;
5919 ilt_client->flags = 0;
5920 ilt_client->start = line;
5921 line += TM_ILT_LINES;
5922 ilt_client->end = line - 1;
5923
5924 DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
5925 "flags 0x%x, hw psz %d\n",
5926 ilt_client->start,
5927 ilt_client->end,
5928 ilt_client->page_size,
5929 ilt_client->flags,
5930 ilog2(ilt_client->page_size >> 12));
5931
5932#else
5933 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
5934#endif
5935}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005936
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005937int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
5938 int is_leading)
5939{
5940 struct bnx2x_client_init_params params = { {0} };
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005941 int rc;
5942
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00005943 /* reset IGU state skip FCoE L2 queue */
5944 if (!IS_FCOE_FP(fp))
5945 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005946 IGU_INT_ENABLE, 0);
5947
5948 params.ramrod_params.pstate = &fp->state;
5949 params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
5950 params.ramrod_params.index = fp->index;
5951 params.ramrod_params.cid = fp->cid;
5952
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00005953#ifdef BCM_CNIC
5954 if (IS_FCOE_FP(fp))
5955 params.ramrod_params.flags |= CLIENT_IS_FCOE;
5956
5957#endif
5958
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005959 if (is_leading)
5960 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
5961
5962 bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
5963
5964 bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
5965
5966 rc = bnx2x_setup_fw_client(bp, &params, 1,
5967 bnx2x_sp(bp, client_init_data),
5968 bnx2x_sp_mapping(bp, client_init_data));
5969 return rc;
5970}
5971
stephen hemminger8d962862010-10-21 07:50:56 +00005972static int bnx2x_stop_fw_client(struct bnx2x *bp,
5973 struct bnx2x_client_ramrod_params *p)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005974{
5975 int rc;
5976
5977 int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
5978
Eliezer Tamirc14423f2008-02-28 11:49:42 -08005979 /* halt the connection */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005980 *p->pstate = BNX2X_FP_STATE_HALTING;
5981 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
5982 p->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005983
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005984 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005985 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
5986 p->pstate, poll_flag);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08005987 if (rc) /* timeout */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005988 return rc;
5989
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005990 *p->pstate = BNX2X_FP_STATE_TERMINATING;
5991 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
5992 p->cl_id, 0);
5993 /* Wait for completion */
5994 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
5995 p->pstate, poll_flag);
5996 if (rc) /* timeout */
5997 return rc;
5998
5999
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006000 /* delete cfc entry */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006001 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006002
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006003 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006004 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
6005 p->pstate, WAIT_RAMROD_COMMON);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006006 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006007}
6008
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006009static int bnx2x_stop_client(struct bnx2x *bp, int index)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006010{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006011 struct bnx2x_client_ramrod_params client_stop = {0};
6012 struct bnx2x_fastpath *fp = &bp->fp[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006013
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006014 client_stop.index = index;
6015 client_stop.cid = fp->cid;
6016 client_stop.cl_id = fp->cl_id;
6017 client_stop.pstate = &(fp->state);
6018 client_stop.poll = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006019
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006020 return bnx2x_stop_fw_client(bp, &client_stop);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006021}
6022
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006023
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006024static void bnx2x_reset_func(struct bnx2x *bp)
6025{
6026 int port = BP_PORT(bp);
6027 int func = BP_FUNC(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006028 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006029 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006030 (CHIP_IS_E2(bp) ?
6031 offsetof(struct hc_status_block_data_e2, common) :
6032 offsetof(struct hc_status_block_data_e1x, common));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006033 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
6034 int pfid_offset = offsetof(struct pci_entity, pf_id);
6035
6036 /* Disable the function in the FW */
6037 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
6038 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
6039 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
6040 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
6041
6042 /* FP SBs */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006043 for_each_eth_queue(bp, i) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006044 struct bnx2x_fastpath *fp = &bp->fp[i];
6045 REG_WR8(bp,
6046 BAR_CSTRORM_INTMEM +
6047 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
6048 + pfunc_offset_fp + pfid_offset,
6049 HC_FUNCTION_DISABLED);
6050 }
6051
6052 /* SP SB */
6053 REG_WR8(bp,
6054 BAR_CSTRORM_INTMEM +
6055 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
6056 pfunc_offset_sp + pfid_offset,
6057 HC_FUNCTION_DISABLED);
6058
6059
6060 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
6061 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
6062 0);
Eliezer Tamir49d66772008-02-28 11:53:13 -08006063
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006064 /* Configure IGU */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006065 if (bp->common.int_block == INT_BLOCK_HC) {
6066 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6067 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6068 } else {
6069 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
6070 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
6071 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006072
Michael Chan37b091b2009-10-10 13:46:55 +00006073#ifdef BCM_CNIC
6074 /* Disable Timer scan */
6075 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
6076 /*
6077 * Wait for at least 10ms and up to 2 second for the timers scan to
6078 * complete
6079 */
6080 for (i = 0; i < 200; i++) {
6081 msleep(10);
6082 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
6083 break;
6084 }
6085#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006086 /* Clear ILT */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006087 bnx2x_clear_func_ilt(bp, func);
6088
6089 /* Timers workaround bug for E2: if this is vnic-3,
6090 * we need to set the entire ilt range for this timers.
6091 */
6092 if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
6093 struct ilt_client_info ilt_cli;
6094 /* use dummy TM client */
6095 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6096 ilt_cli.start = 0;
6097 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6098 ilt_cli.client_num = ILT_CLIENT_TM;
6099
6100 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
6101 }
6102
6103 /* this assumes that reset_port() called before reset_func()*/
6104 if (CHIP_IS_E2(bp))
6105 bnx2x_pf_disable(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006106
6107 bp->dmae_ready = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006108}
6109
6110static void bnx2x_reset_port(struct bnx2x *bp)
6111{
6112 int port = BP_PORT(bp);
6113 u32 val;
6114
6115 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6116
6117 /* Do not rcv packets to BRB */
6118 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6119 /* Do not direct rcv packets that are not for MCP to the BRB */
6120 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6121 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6122
6123 /* Configure AEU */
6124 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6125
6126 msleep(100);
6127 /* Check for BRB port occupancy */
6128 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6129 if (val)
6130 DP(NETIF_MSG_IFDOWN,
Eilon Greenstein33471622008-08-13 15:59:08 -07006131 "BRB1 is not empty %d blocks are occupied\n", val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006132
6133 /* TODO: Close Doorbell port? */
6134}
6135
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006136static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6137{
6138 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006139 BP_ABS_FUNC(bp), reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006140
6141 switch (reset_code) {
6142 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6143 bnx2x_reset_port(bp);
6144 bnx2x_reset_func(bp);
6145 bnx2x_reset_common(bp);
6146 break;
6147
6148 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6149 bnx2x_reset_port(bp);
6150 bnx2x_reset_func(bp);
6151 break;
6152
6153 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6154 bnx2x_reset_func(bp);
6155 break;
6156
6157 default:
6158 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6159 break;
6160 }
6161}
6162
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006163#ifdef BCM_CNIC
6164static inline void bnx2x_del_fcoe_eth_macs(struct bnx2x *bp)
6165{
6166 if (bp->flags & FCOE_MACS_SET) {
6167 if (!IS_MF_SD(bp))
6168 bnx2x_set_fip_eth_mac_addr(bp, 0);
6169
6170 bnx2x_set_all_enode_macs(bp, 0);
6171
6172 bp->flags &= ~FCOE_MACS_SET;
6173 }
6174}
6175#endif
6176
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00006177void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006178{
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006179 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006180 u32 reset_code = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006181 int i, cnt, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006182
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006183 /* Wait until tx fastpath tasks complete */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006184 for_each_tx_queue(bp, i) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08006185 struct bnx2x_fastpath *fp = &bp->fp[i];
6186
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006187 cnt = 1000;
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -08006188 while (bnx2x_has_tx_work_unload(fp)) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006189
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006190 if (!cnt) {
6191 BNX2X_ERR("timeout waiting for queue[%d]\n",
6192 i);
6193#ifdef BNX2X_STOP_ON_ERROR
6194 bnx2x_panic();
6195 return -EBUSY;
6196#else
6197 break;
6198#endif
6199 }
6200 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006201 msleep(1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006202 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08006203 }
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006204 /* Give HW time to discard old tx messages */
6205 msleep(1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006206
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08006207 bnx2x_set_eth_mac(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07006208
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08006209 bnx2x_invalidate_uc_list(bp);
6210
6211 if (CHIP_IS_E1(bp))
6212 bnx2x_invalidate_e1_mc_list(bp);
6213 else {
6214 bnx2x_invalidate_e1h_mc_list(bp);
Yitchak Gertner65abd742008-08-25 15:26:24 -07006215 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07006216 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006217
Michael Chan993ac7b2009-10-10 13:46:56 +00006218#ifdef BCM_CNIC
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006219 bnx2x_del_fcoe_eth_macs(bp);
Michael Chan993ac7b2009-10-10 13:46:56 +00006220#endif
Yitchak Gertner65abd742008-08-25 15:26:24 -07006221
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006222 if (unload_mode == UNLOAD_NORMAL)
6223 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eliezer Tamir228241e2008-02-28 11:56:57 -08006224
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00006225 else if (bp->flags & NO_WOL_FLAG)
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006226 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006227
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00006228 else if (bp->wol) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006229 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006230 u8 *mac_addr = bp->dev->dev_addr;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006231 u32 val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006232 /* The mac address is written to entries 1-4 to
6233 preserve entry 0 which is used by the PMF */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006234 u8 entry = (BP_E1HVN(bp) + 1)*8;
6235
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006236 val = (mac_addr[0] << 8) | mac_addr[1];
Eilon Greenstein3196a882008-08-13 15:58:49 -07006237 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006238
6239 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6240 (mac_addr[4] << 8) | mac_addr[5];
Eilon Greenstein3196a882008-08-13 15:58:49 -07006241 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006242
6243 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
Eliezer Tamir228241e2008-02-28 11:56:57 -08006244
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006245 } else
6246 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6247
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006248 /* Close multi and leading connections
6249 Completions for ramrods are collected in a synchronous way */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006250 for_each_queue(bp, i)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006251
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006252 if (bnx2x_stop_client(bp, i))
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006253#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006254 return;
6255#else
6256 goto unload_error;
6257#endif
6258
6259 rc = bnx2x_func_stop(bp);
6260 if (rc) {
6261 BNX2X_ERR("Function stop failed!\n");
6262#ifdef BNX2X_STOP_ON_ERROR
6263 return;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006264#else
6265 goto unload_error;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006266#endif
Eliezer Tamir228241e2008-02-28 11:56:57 -08006267 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006268#ifndef BNX2X_STOP_ON_ERROR
Eliezer Tamir228241e2008-02-28 11:56:57 -08006269unload_error:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006270#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006271 if (!BP_NOMCP(bp))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006272 reset_code = bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006273 else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006274 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
6275 "%d, %d, %d\n", BP_PATH(bp),
6276 load_count[BP_PATH(bp)][0],
6277 load_count[BP_PATH(bp)][1],
6278 load_count[BP_PATH(bp)][2]);
6279 load_count[BP_PATH(bp)][0]--;
6280 load_count[BP_PATH(bp)][1 + port]--;
6281 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
6282 "%d, %d, %d\n", BP_PATH(bp),
6283 load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
6284 load_count[BP_PATH(bp)][2]);
6285 if (load_count[BP_PATH(bp)][0] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006286 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006287 else if (load_count[BP_PATH(bp)][1 + port] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006288 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6289 else
6290 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6291 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006292
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006293 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6294 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6295 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006296
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006297 /* Disable HW interrupts, NAPI */
6298 bnx2x_netif_stop(bp, 1);
6299
6300 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006301 bnx2x_free_irq(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006302
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006303 /* Reset the chip */
Eliezer Tamir228241e2008-02-28 11:56:57 -08006304 bnx2x_reset_chip(bp, reset_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006305
6306 /* Report UNLOAD_DONE to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006307 if (!BP_NOMCP(bp))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006308 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006309
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006310}
6311
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00006312void bnx2x_disable_close_the_gate(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006313{
6314 u32 val;
6315
6316 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
6317
6318 if (CHIP_IS_E1(bp)) {
6319 int port = BP_PORT(bp);
6320 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6321 MISC_REG_AEU_MASK_ATTN_FUNC_0;
6322
6323 val = REG_RD(bp, addr);
6324 val &= ~(0x300);
6325 REG_WR(bp, addr, val);
6326 } else if (CHIP_IS_E1H(bp)) {
6327 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
6328 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
6329 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
6330 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
6331 }
6332}
6333
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006334/* Close gates #2, #3 and #4: */
6335static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
6336{
6337 u32 val, addr;
6338
6339 /* Gates #2 and #4a are closed/opened for "not E1" only */
6340 if (!CHIP_IS_E1(bp)) {
6341 /* #4 */
6342 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
6343 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
6344 close ? (val | 0x1) : (val & (~(u32)1)));
6345 /* #2 */
6346 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
6347 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
6348 close ? (val | 0x1) : (val & (~(u32)1)));
6349 }
6350
6351 /* #3 */
6352 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
6353 val = REG_RD(bp, addr);
6354 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
6355
6356 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
6357 close ? "closing" : "opening");
6358 mmiowb();
6359}
6360
6361#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
6362
6363static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
6364{
6365 /* Do some magic... */
6366 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
6367 *magic_val = val & SHARED_MF_CLP_MAGIC;
6368 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
6369}
6370
Dmitry Kravkove8920672011-05-04 23:52:40 +00006371/**
6372 * bnx2x_clp_reset_done - restore the value of the `magic' bit.
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006373 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00006374 * @bp: driver handle
6375 * @magic_val: old value of the `magic' bit.
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006376 */
6377static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
6378{
6379 /* Restore the `magic' bit value... */
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006380 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
6381 MF_CFG_WR(bp, shared_mf_config.clp_mb,
6382 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
6383}
6384
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006385/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00006386 * bnx2x_reset_mcp_prep - prepare for MCP reset.
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006387 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00006388 * @bp: driver handle
6389 * @magic_val: old value of 'magic' bit.
6390 *
6391 * Takes care of CLP configurations.
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006392 */
6393static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
6394{
6395 u32 shmem;
6396 u32 validity_offset;
6397
6398 DP(NETIF_MSG_HW, "Starting\n");
6399
6400 /* Set `magic' bit in order to save MF config */
6401 if (!CHIP_IS_E1(bp))
6402 bnx2x_clp_reset_prep(bp, magic_val);
6403
6404 /* Get shmem offset */
6405 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6406 validity_offset = offsetof(struct shmem_region, validity_map[0]);
6407
6408 /* Clear validity map flags */
6409 if (shmem > 0)
6410 REG_WR(bp, shmem + validity_offset, 0);
6411}
6412
6413#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
6414#define MCP_ONE_TIMEOUT 100 /* 100 ms */
6415
Dmitry Kravkove8920672011-05-04 23:52:40 +00006416/**
6417 * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006418 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00006419 * @bp: driver handle
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006420 */
6421static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
6422{
6423 /* special handling for emulation and FPGA,
6424 wait 10 times longer */
6425 if (CHIP_REV_IS_SLOW(bp))
6426 msleep(MCP_ONE_TIMEOUT*10);
6427 else
6428 msleep(MCP_ONE_TIMEOUT);
6429}
6430
Dmitry Kravkov1b6e2ce2011-05-22 10:11:26 +00006431/*
6432 * initializes bp->common.shmem_base and waits for validity signature to appear
6433 */
6434static int bnx2x_init_shmem(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006435{
Dmitry Kravkov1b6e2ce2011-05-22 10:11:26 +00006436 int cnt = 0;
6437 u32 val = 0;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006438
Dmitry Kravkov1b6e2ce2011-05-22 10:11:26 +00006439 do {
6440 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6441 if (bp->common.shmem_base) {
6442 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6443 if (val & SHR_MEM_VALIDITY_MB)
6444 return 0;
6445 }
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006446
6447 bnx2x_mcp_wait_one(bp);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006448
Dmitry Kravkov1b6e2ce2011-05-22 10:11:26 +00006449 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006450
Dmitry Kravkov1b6e2ce2011-05-22 10:11:26 +00006451 BNX2X_ERR("BAD MCP validity signature\n");
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006452
Dmitry Kravkov1b6e2ce2011-05-22 10:11:26 +00006453 return -ENODEV;
6454}
6455
6456static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
6457{
6458 int rc = bnx2x_init_shmem(bp);
6459
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006460 /* Restore the `magic' bit value */
6461 if (!CHIP_IS_E1(bp))
6462 bnx2x_clp_reset_done(bp, magic_val);
6463
6464 return rc;
6465}
6466
6467static void bnx2x_pxp_prep(struct bnx2x *bp)
6468{
6469 if (!CHIP_IS_E1(bp)) {
6470 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
6471 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
6472 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
6473 mmiowb();
6474 }
6475}
6476
6477/*
6478 * Reset the whole chip except for:
6479 * - PCIE core
6480 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
6481 * one reset bit)
6482 * - IGU
6483 * - MISC (including AEU)
6484 * - GRC
6485 * - RBCN, RBCP
6486 */
6487static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
6488{
6489 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
6490
6491 not_reset_mask1 =
6492 MISC_REGISTERS_RESET_REG_1_RST_HC |
6493 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
6494 MISC_REGISTERS_RESET_REG_1_RST_PXP;
6495
6496 not_reset_mask2 =
6497 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
6498 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
6499 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
6500 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
6501 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
6502 MISC_REGISTERS_RESET_REG_2_RST_GRC |
6503 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
6504 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
6505
6506 reset_mask1 = 0xffffffff;
6507
6508 if (CHIP_IS_E1(bp))
6509 reset_mask2 = 0xffff;
6510 else
6511 reset_mask2 = 0x1ffff;
6512
6513 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6514 reset_mask1 & (~not_reset_mask1));
6515 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6516 reset_mask2 & (~not_reset_mask2));
6517
6518 barrier();
6519 mmiowb();
6520
6521 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
6522 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
6523 mmiowb();
6524}
6525
6526static int bnx2x_process_kill(struct bnx2x *bp)
6527{
6528 int cnt = 1000;
6529 u32 val = 0;
6530 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
6531
6532
6533 /* Empty the Tetris buffer, wait for 1s */
6534 do {
6535 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
6536 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
6537 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
6538 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
6539 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
6540 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
6541 ((port_is_idle_0 & 0x1) == 0x1) &&
6542 ((port_is_idle_1 & 0x1) == 0x1) &&
6543 (pgl_exp_rom2 == 0xffffffff))
6544 break;
6545 msleep(1);
6546 } while (cnt-- > 0);
6547
6548 if (cnt <= 0) {
6549 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
6550 " are still"
6551 " outstanding read requests after 1s!\n");
6552 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
6553 " port_is_idle_0=0x%08x,"
6554 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
6555 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
6556 pgl_exp_rom2);
6557 return -EAGAIN;
6558 }
6559
6560 barrier();
6561
6562 /* Close gates #2, #3 and #4 */
6563 bnx2x_set_234_gates(bp, true);
6564
6565 /* TBD: Indicate that "process kill" is in progress to MCP */
6566
6567 /* Clear "unprepared" bit */
6568 REG_WR(bp, MISC_REG_UNPREPARED, 0);
6569 barrier();
6570
6571 /* Make sure all is written to the chip before the reset */
6572 mmiowb();
6573
6574 /* Wait for 1ms to empty GLUE and PCI-E core queues,
6575 * PSWHST, GRC and PSWRD Tetris buffer.
6576 */
6577 msleep(1);
6578
6579 /* Prepare to chip reset: */
6580 /* MCP */
6581 bnx2x_reset_mcp_prep(bp, &val);
6582
6583 /* PXP */
6584 bnx2x_pxp_prep(bp);
6585 barrier();
6586
6587 /* reset the chip */
6588 bnx2x_process_kill_chip_reset(bp);
6589 barrier();
6590
6591 /* Recover after reset: */
6592 /* MCP */
6593 if (bnx2x_reset_mcp_comp(bp, val))
6594 return -EAGAIN;
6595
6596 /* PXP */
6597 bnx2x_pxp_prep(bp);
6598
6599 /* Open the gates #2, #3 and #4 */
6600 bnx2x_set_234_gates(bp, false);
6601
6602 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
6603 * reset state, re-enable attentions. */
6604
6605 return 0;
6606}
6607
6608static int bnx2x_leader_reset(struct bnx2x *bp)
6609{
6610 int rc = 0;
6611 /* Try to recover after the failure */
6612 if (bnx2x_process_kill(bp)) {
6613 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
6614 bp->dev->name);
6615 rc = -EAGAIN;
6616 goto exit_leader_reset;
6617 }
6618
6619 /* Clear "reset is in progress" bit and update the driver state */
6620 bnx2x_set_reset_done(bp);
6621 bp->recovery_state = BNX2X_RECOVERY_DONE;
6622
6623exit_leader_reset:
6624 bp->is_leader = 0;
6625 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
6626 smp_wmb();
6627 return rc;
6628}
6629
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006630/* Assumption: runs under rtnl lock. This together with the fact
6631 * that it's called only from bnx2x_reset_task() ensure that it
6632 * will never be called when netif_running(bp->dev) is false.
6633 */
6634static void bnx2x_parity_recover(struct bnx2x *bp)
6635{
6636 DP(NETIF_MSG_HW, "Handling parity\n");
6637 while (1) {
6638 switch (bp->recovery_state) {
6639 case BNX2X_RECOVERY_INIT:
6640 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
6641 /* Try to get a LEADER_LOCK HW lock */
6642 if (bnx2x_trylock_hw_lock(bp,
6643 HW_LOCK_RESOURCE_RESERVED_08))
6644 bp->is_leader = 1;
6645
6646 /* Stop the driver */
6647 /* If interface has been removed - break */
6648 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
6649 return;
6650
6651 bp->recovery_state = BNX2X_RECOVERY_WAIT;
6652 /* Ensure "is_leader" and "recovery_state"
6653 * update values are seen on other CPUs
6654 */
6655 smp_wmb();
6656 break;
6657
6658 case BNX2X_RECOVERY_WAIT:
6659 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
6660 if (bp->is_leader) {
6661 u32 load_counter = bnx2x_get_load_cnt(bp);
6662 if (load_counter) {
6663 /* Wait until all other functions get
6664 * down.
6665 */
6666 schedule_delayed_work(&bp->reset_task,
6667 HZ/10);
6668 return;
6669 } else {
6670 /* If all other functions got down -
6671 * try to bring the chip back to
6672 * normal. In any case it's an exit
6673 * point for a leader.
6674 */
6675 if (bnx2x_leader_reset(bp) ||
6676 bnx2x_nic_load(bp, LOAD_NORMAL)) {
6677 printk(KERN_ERR"%s: Recovery "
6678 "has failed. Power cycle is "
6679 "needed.\n", bp->dev->name);
6680 /* Disconnect this device */
6681 netif_device_detach(bp->dev);
6682 /* Block ifup for all function
6683 * of this ASIC until
6684 * "process kill" or power
6685 * cycle.
6686 */
6687 bnx2x_set_reset_in_progress(bp);
6688 /* Shut down the power */
6689 bnx2x_set_power_state(bp,
6690 PCI_D3hot);
6691 return;
6692 }
6693
6694 return;
6695 }
6696 } else { /* non-leader */
6697 if (!bnx2x_reset_is_done(bp)) {
6698 /* Try to get a LEADER_LOCK HW lock as
6699 * long as a former leader may have
6700 * been unloaded by the user or
6701 * released a leadership by another
6702 * reason.
6703 */
6704 if (bnx2x_trylock_hw_lock(bp,
6705 HW_LOCK_RESOURCE_RESERVED_08)) {
6706 /* I'm a leader now! Restart a
6707 * switch case.
6708 */
6709 bp->is_leader = 1;
6710 break;
6711 }
6712
6713 schedule_delayed_work(&bp->reset_task,
6714 HZ/10);
6715 return;
6716
6717 } else { /* A leader has completed
6718 * the "process kill". It's an exit
6719 * point for a non-leader.
6720 */
6721 bnx2x_nic_load(bp, LOAD_NORMAL);
6722 bp->recovery_state =
6723 BNX2X_RECOVERY_DONE;
6724 smp_wmb();
6725 return;
6726 }
6727 }
6728 default:
6729 return;
6730 }
6731 }
6732}
6733
6734/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
6735 * scheduled on a general queue in order to prevent a dead lock.
6736 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006737static void bnx2x_reset_task(struct work_struct *work)
6738{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006739 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006740
6741#ifdef BNX2X_STOP_ON_ERROR
6742 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6743 " so reset not done to allow debug dump,\n"
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006744 KERN_ERR " you will need to reboot when done\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006745 return;
6746#endif
6747
6748 rtnl_lock();
6749
6750 if (!netif_running(bp->dev))
6751 goto reset_task_exit;
6752
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006753 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
6754 bnx2x_parity_recover(bp);
6755 else {
6756 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6757 bnx2x_nic_load(bp, LOAD_NORMAL);
6758 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006759
6760reset_task_exit:
6761 rtnl_unlock();
6762}
6763
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006764/* end of nic load/unload */
6765
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006766/*
6767 * Init service functions
6768 */
6769
stephen hemminger8d962862010-10-21 07:50:56 +00006770static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00006771{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006772 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
6773 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
6774 return base + (BP_ABS_FUNC(bp)) * stride;
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00006775}
6776
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006777static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00006778{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006779 u32 reg = bnx2x_get_pretend_reg(bp);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00006780
6781 /* Flush all outstanding writes */
6782 mmiowb();
6783
6784 /* Pretend to be function 0 */
6785 REG_WR(bp, reg, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006786 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00006787
6788 /* From now we are in the "like-E1" mode */
6789 bnx2x_int_disable(bp);
6790
6791 /* Flush all outstanding writes */
6792 mmiowb();
6793
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006794 /* Restore the original function */
6795 REG_WR(bp, reg, BP_ABS_FUNC(bp));
6796 REG_RD(bp, reg);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00006797}
6798
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006799static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00006800{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006801 if (CHIP_IS_E1(bp))
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00006802 bnx2x_int_disable(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006803 else
6804 bnx2x_undi_int_disable_e1h(bp);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00006805}
6806
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006807static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006808{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006809 u32 val;
6810
6811 /* Check if there is any driver already loaded */
6812 val = REG_RD(bp, MISC_REG_UNPREPARED);
6813 if (val == 0x1) {
6814 /* Check if it is the UNDI driver
6815 * UNDI driver initializes CID offset for normal bell to 0x7
6816 */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07006817 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006818 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6819 if (val == 0x7) {
6820 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006821 /* save our pf_num */
6822 int orig_pf_num = bp->pf_num;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006823 u32 swap_en;
6824 u32 swap_val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006825
Eilon Greensteinb4661732009-01-14 06:43:56 +00006826 /* clear the UNDI indication */
6827 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6828
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006829 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6830
6831 /* try unload UNDI on port 0 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006832 bp->pf_num = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006833 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006834 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006835 DRV_MSG_SEQ_NUMBER_MASK);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006836 reset_code = bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006837
6838 /* if UNDI is loaded on the other port */
6839 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6840
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006841 /* send "DONE" for previous unload */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006842 bnx2x_fw_command(bp,
6843 DRV_MSG_CODE_UNLOAD_DONE, 0);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006844
6845 /* unload UNDI on port 1 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006846 bp->pf_num = 1;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006847 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006848 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006849 DRV_MSG_SEQ_NUMBER_MASK);
6850 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006851
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006852 bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006853 }
6854
Eilon Greensteinb4661732009-01-14 06:43:56 +00006855 /* now it's safe to release the lock */
6856 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6857
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006858 bnx2x_undi_int_disable(bp);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006859
6860 /* close input traffic and wait for it */
6861 /* Do not rcv packets to BRB */
6862 REG_WR(bp,
6863 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6864 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6865 /* Do not direct rcv packets that are not for MCP to
6866 * the BRB */
6867 REG_WR(bp,
6868 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6869 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6870 /* clear AEU */
6871 REG_WR(bp,
6872 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6873 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6874 msleep(10);
6875
6876 /* save NIG port swap info */
6877 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6878 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006879 /* reset device */
6880 REG_WR(bp,
6881 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006882 0xd3ffffff);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006883 REG_WR(bp,
6884 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6885 0x1403);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006886 /* take the NIG out of reset and restore swap values */
6887 REG_WR(bp,
6888 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6889 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6890 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6891 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6892
6893 /* send unload done to the MCP */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006894 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006895
6896 /* restore our func and fw_seq */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006897 bp->pf_num = orig_pf_num;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006898 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006899 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006900 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greensteinb4661732009-01-14 06:43:56 +00006901 } else
6902 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006903 }
6904}
6905
6906static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6907{
6908 u32 val, val2, val3, val4, id;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07006909 u16 pmc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006910
6911 /* Get the chip revision id and number. */
6912 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6913 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6914 id = ((val & 0xffff) << 16);
6915 val = REG_RD(bp, MISC_REG_CHIP_REV);
6916 id |= ((val & 0xf) << 12);
6917 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6918 id |= ((val & 0xff) << 4);
Eilon Greenstein5a40e082009-01-14 06:44:04 +00006919 val = REG_RD(bp, MISC_REG_BOND_ID);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006920 id |= (val & 0xf);
6921 bp->common.chip_id = id;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006922
6923 /* Set doorbell size */
6924 bp->db_size = (1 << BNX2X_DB_SHIFT);
6925
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006926 if (CHIP_IS_E2(bp)) {
6927 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
6928 if ((val & 1) == 0)
6929 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
6930 else
6931 val = (val >> 1) & 1;
6932 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
6933 "2_PORT_MODE");
6934 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
6935 CHIP_2_PORT_MODE;
6936
6937 if (CHIP_MODE_IS_4_PORT(bp))
6938 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
6939 else
6940 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
6941 } else {
6942 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
6943 bp->pfid = bp->pf_num; /* 0..7 */
6944 }
6945
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006946 /*
6947 * set base FW non-default (fast path) status block id, this value is
6948 * used to initialize the fw_sb_id saved on the fp/queue structure to
6949 * determine the id used by the FW.
6950 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006951 if (CHIP_IS_E1x(bp))
6952 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
6953 else /* E2 */
6954 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
6955
6956 bp->link_params.chip_id = bp->common.chip_id;
6957 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006958
Eilon Greenstein1c063282009-02-12 08:36:43 +00006959 val = (REG_RD(bp, 0x2874) & 0x55);
6960 if ((bp->common.chip_id & 0x1) ||
6961 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
6962 bp->flags |= ONE_PORT_FLAG;
6963 BNX2X_DEV_INFO("single port device\n");
6964 }
6965
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006966 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6967 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6968 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6969 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6970 bp->common.flash_size, bp->common.flash_size);
6971
Dmitry Kravkov1b6e2ce2011-05-22 10:11:26 +00006972 bnx2x_init_shmem(bp);
6973
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006974 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
6975 MISC_REG_GENERIC_CR_1 :
6976 MISC_REG_GENERIC_CR_0));
Dmitry Kravkov1b6e2ce2011-05-22 10:11:26 +00006977
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006978 bp->link_params.shmem_base = bp->common.shmem_base;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006979 bp->link_params.shmem2_base = bp->common.shmem2_base;
Eilon Greenstein2691d512009-08-12 08:22:08 +00006980 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
6981 bp->common.shmem_base, bp->common.shmem2_base);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006982
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006983 if (!bp->common.shmem_base) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006984 BNX2X_DEV_INFO("MCP not active\n");
6985 bp->flags |= NO_MCP_FLAG;
6986 return;
6987 }
6988
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006989 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00006990 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006991
6992 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6993 SHARED_HW_CFG_LED_MODE_MASK) >>
6994 SHARED_HW_CFG_LED_MODE_SHIFT);
6995
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00006996 bp->link_params.feature_config_flags = 0;
6997 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
6998 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
6999 bp->link_params.feature_config_flags |=
7000 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7001 else
7002 bp->link_params.feature_config_flags &=
7003 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7004
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007005 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7006 bp->common.bc_ver = val;
7007 BNX2X_DEV_INFO("bc_ver %X\n", val);
7008 if (val < BNX2X_BC_VER) {
7009 /* for now only warn
7010 * later we might need to enforce this */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007011 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
7012 "please upgrade BC\n", BNX2X_BC_VER, val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007013 }
Eilon Greenstein4d295db2009-07-21 05:47:47 +00007014 bp->link_params.feature_config_flags |=
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007015 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007016 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7017
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007018 bp->link_params.feature_config_flags |=
7019 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
7020 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07007021
Dmitry Kravkovf9a3ebb2011-05-04 23:49:11 +00007022 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7023 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7024
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07007025 BNX2X_DEV_INFO("%sWoL capable\n",
Eilon Greensteinf5372252009-02-12 08:38:30 +00007026 (bp->flags & NO_WOL_FLAG) ? "not " : "");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007027
7028 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7029 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7030 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7031 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7032
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007033 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
7034 val, val2, val3, val4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007035}
7036
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007037#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7038#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7039
7040static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7041{
7042 int pfid = BP_FUNC(bp);
7043 int vn = BP_E1HVN(bp);
7044 int igu_sb_id;
7045 u32 val;
7046 u8 fid;
7047
7048 bp->igu_base_sb = 0xff;
7049 bp->igu_sb_cnt = 0;
7050 if (CHIP_INT_MODE_IS_BC(bp)) {
7051 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00007052 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007053
7054 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
7055 FP_SB_MAX_E1x;
7056
7057 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
7058 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
7059
7060 return;
7061 }
7062
7063 /* IGU in normal mode - read CAM */
7064 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
7065 igu_sb_id++) {
7066 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
7067 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
7068 continue;
7069 fid = IGU_FID(val);
7070 if ((fid & IGU_FID_ENCODE_IS_PF)) {
7071 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
7072 continue;
7073 if (IGU_VEC(val) == 0)
7074 /* default status block */
7075 bp->igu_dsb_id = igu_sb_id;
7076 else {
7077 if (bp->igu_base_sb == 0xff)
7078 bp->igu_base_sb = igu_sb_id;
7079 bp->igu_sb_cnt++;
7080 }
7081 }
7082 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00007083 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
7084 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007085 if (bp->igu_sb_cnt == 0)
7086 BNX2X_ERR("CAM configuration error\n");
7087}
7088
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007089static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7090 u32 switch_cfg)
7091{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007092 int cfg_size = 0, idx, port = BP_PORT(bp);
7093
7094 /* Aggregation of supported attributes of all external phys */
7095 bp->port.supported[0] = 0;
7096 bp->port.supported[1] = 0;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007097 switch (bp->link_params.num_phys) {
7098 case 1:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007099 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
7100 cfg_size = 1;
7101 break;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007102 case 2:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007103 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
7104 cfg_size = 1;
7105 break;
7106 case 3:
7107 if (bp->link_params.multi_phy_config &
7108 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
7109 bp->port.supported[1] =
7110 bp->link_params.phy[EXT_PHY1].supported;
7111 bp->port.supported[0] =
7112 bp->link_params.phy[EXT_PHY2].supported;
7113 } else {
7114 bp->port.supported[0] =
7115 bp->link_params.phy[EXT_PHY1].supported;
7116 bp->port.supported[1] =
7117 bp->link_params.phy[EXT_PHY2].supported;
7118 }
7119 cfg_size = 2;
7120 break;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007121 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007122
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007123 if (!(bp->port.supported[0] || bp->port.supported[1])) {
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007124 BNX2X_ERR("NVRAM config error. BAD phy config."
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007125 "PHY1 config 0x%x, PHY2 config 0x%x\n",
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007126 SHMEM_RD(bp,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007127 dev_info.port_hw_config[port].external_phy_config),
7128 SHMEM_RD(bp,
7129 dev_info.port_hw_config[port].external_phy_config2));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007130 return;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007131 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007132
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007133 switch (switch_cfg) {
7134 case SWITCH_CFG_1G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007135 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7136 port*0x10);
7137 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007138 break;
7139
7140 case SWITCH_CFG_10G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007141 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7142 port*0x18);
7143 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007144 break;
7145
7146 default:
7147 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007148 bp->port.link_config[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007149 return;
7150 }
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007151 /* mask what we support according to speed_cap_mask per configuration */
7152 for (idx = 0; idx < cfg_size; idx++) {
7153 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007154 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007155 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007156
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007157 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007158 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007159 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007160
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007161 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007162 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007163 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007164
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007165 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007166 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007167 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007168
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007169 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007170 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007171 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007172 SUPPORTED_1000baseT_Full);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007173
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007174 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007175 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007176 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007177
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007178 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007179 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007180 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007181
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007182 }
7183
7184 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
7185 bp->port.supported[1]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007186}
7187
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007188static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007189{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007190 u32 link_config, idx, cfg_size = 0;
7191 bp->port.advertising[0] = 0;
7192 bp->port.advertising[1] = 0;
7193 switch (bp->link_params.num_phys) {
7194 case 1:
7195 case 2:
7196 cfg_size = 1;
7197 break;
7198 case 3:
7199 cfg_size = 2;
7200 break;
7201 }
7202 for (idx = 0; idx < cfg_size; idx++) {
7203 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
7204 link_config = bp->port.link_config[idx];
7205 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007206 case PORT_FEATURE_LINK_SPEED_AUTO:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007207 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
7208 bp->link_params.req_line_speed[idx] =
7209 SPEED_AUTO_NEG;
7210 bp->port.advertising[idx] |=
7211 bp->port.supported[idx];
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007212 } else {
7213 /* force 10G, no AN */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007214 bp->link_params.req_line_speed[idx] =
7215 SPEED_10000;
7216 bp->port.advertising[idx] |=
7217 (ADVERTISED_10000baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007218 ADVERTISED_FIBRE);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007219 continue;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007220 }
7221 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007222
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007223 case PORT_FEATURE_LINK_SPEED_10M_FULL:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007224 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
7225 bp->link_params.req_line_speed[idx] =
7226 SPEED_10;
7227 bp->port.advertising[idx] |=
7228 (ADVERTISED_10baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007229 ADVERTISED_TP);
7230 } else {
7231 BNX2X_ERROR("NVRAM config error. "
7232 "Invalid link_config 0x%x"
7233 " speed_cap_mask 0x%x\n",
7234 link_config,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007235 bp->link_params.speed_cap_mask[idx]);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007236 return;
7237 }
7238 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007239
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007240 case PORT_FEATURE_LINK_SPEED_10M_HALF:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007241 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
7242 bp->link_params.req_line_speed[idx] =
7243 SPEED_10;
7244 bp->link_params.req_duplex[idx] =
7245 DUPLEX_HALF;
7246 bp->port.advertising[idx] |=
7247 (ADVERTISED_10baseT_Half |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007248 ADVERTISED_TP);
7249 } else {
7250 BNX2X_ERROR("NVRAM config error. "
7251 "Invalid link_config 0x%x"
7252 " speed_cap_mask 0x%x\n",
7253 link_config,
7254 bp->link_params.speed_cap_mask[idx]);
7255 return;
7256 }
7257 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007258
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007259 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7260 if (bp->port.supported[idx] &
7261 SUPPORTED_100baseT_Full) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007262 bp->link_params.req_line_speed[idx] =
7263 SPEED_100;
7264 bp->port.advertising[idx] |=
7265 (ADVERTISED_100baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007266 ADVERTISED_TP);
7267 } else {
7268 BNX2X_ERROR("NVRAM config error. "
7269 "Invalid link_config 0x%x"
7270 " speed_cap_mask 0x%x\n",
7271 link_config,
7272 bp->link_params.speed_cap_mask[idx]);
7273 return;
7274 }
7275 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007276
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007277 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7278 if (bp->port.supported[idx] &
7279 SUPPORTED_100baseT_Half) {
7280 bp->link_params.req_line_speed[idx] =
7281 SPEED_100;
7282 bp->link_params.req_duplex[idx] =
7283 DUPLEX_HALF;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007284 bp->port.advertising[idx] |=
7285 (ADVERTISED_100baseT_Half |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007286 ADVERTISED_TP);
7287 } else {
7288 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007289 "Invalid link_config 0x%x"
7290 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007291 link_config,
7292 bp->link_params.speed_cap_mask[idx]);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007293 return;
7294 }
7295 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007296
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007297 case PORT_FEATURE_LINK_SPEED_1G:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007298 if (bp->port.supported[idx] &
7299 SUPPORTED_1000baseT_Full) {
7300 bp->link_params.req_line_speed[idx] =
7301 SPEED_1000;
7302 bp->port.advertising[idx] |=
7303 (ADVERTISED_1000baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007304 ADVERTISED_TP);
7305 } else {
7306 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007307 "Invalid link_config 0x%x"
7308 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007309 link_config,
7310 bp->link_params.speed_cap_mask[idx]);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007311 return;
7312 }
7313 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007314
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007315 case PORT_FEATURE_LINK_SPEED_2_5G:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007316 if (bp->port.supported[idx] &
7317 SUPPORTED_2500baseX_Full) {
7318 bp->link_params.req_line_speed[idx] =
7319 SPEED_2500;
7320 bp->port.advertising[idx] |=
7321 (ADVERTISED_2500baseX_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007322 ADVERTISED_TP);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007323 } else {
7324 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007325 "Invalid link_config 0x%x"
7326 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007327 link_config,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007328 bp->link_params.speed_cap_mask[idx]);
7329 return;
7330 }
7331 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007332
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007333 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7334 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7335 case PORT_FEATURE_LINK_SPEED_10G_KR:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007336 if (bp->port.supported[idx] &
7337 SUPPORTED_10000baseT_Full) {
7338 bp->link_params.req_line_speed[idx] =
7339 SPEED_10000;
7340 bp->port.advertising[idx] |=
7341 (ADVERTISED_10000baseT_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007342 ADVERTISED_FIBRE);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007343 } else {
7344 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007345 "Invalid link_config 0x%x"
7346 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007347 link_config,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007348 bp->link_params.speed_cap_mask[idx]);
7349 return;
7350 }
7351 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007352
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007353 default:
7354 BNX2X_ERROR("NVRAM config error. "
7355 "BAD link speed link_config 0x%x\n",
7356 link_config);
7357 bp->link_params.req_line_speed[idx] =
7358 SPEED_AUTO_NEG;
7359 bp->port.advertising[idx] =
7360 bp->port.supported[idx];
7361 break;
7362 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007363
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007364 bp->link_params.req_flow_ctrl[idx] = (link_config &
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007365 PORT_FEATURE_FLOW_CONTROL_MASK);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007366 if ((bp->link_params.req_flow_ctrl[idx] ==
7367 BNX2X_FLOW_CTRL_AUTO) &&
7368 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
7369 bp->link_params.req_flow_ctrl[idx] =
7370 BNX2X_FLOW_CTRL_NONE;
7371 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007372
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007373 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
7374 " 0x%x advertising 0x%x\n",
7375 bp->link_params.req_line_speed[idx],
7376 bp->link_params.req_duplex[idx],
7377 bp->link_params.req_flow_ctrl[idx],
7378 bp->port.advertising[idx]);
7379 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007380}
7381
Michael Chane665bfd2009-10-10 13:46:54 +00007382static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
7383{
7384 mac_hi = cpu_to_be16(mac_hi);
7385 mac_lo = cpu_to_be32(mac_lo);
7386 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
7387 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
7388}
7389
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007390static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007391{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007392 int port = BP_PORT(bp);
Eilon Greenstein589abe32009-02-12 08:36:55 +00007393 u32 config;
Joe Perches6f38ad92010-11-14 17:04:31 +00007394 u32 ext_phy_type, ext_phy_config;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007395
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007396 bp->link_params.bp = bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007397 bp->link_params.port = port;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007398
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007399 bp->link_params.lane_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007400 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
Eilon Greenstein4d295db2009-07-21 05:47:47 +00007401
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007402 bp->link_params.speed_cap_mask[0] =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007403 SHMEM_RD(bp,
7404 dev_info.port_hw_config[port].speed_capability_mask);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007405 bp->link_params.speed_cap_mask[1] =
7406 SHMEM_RD(bp,
7407 dev_info.port_hw_config[port].speed_capability_mask2);
7408 bp->port.link_config[0] =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007409 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7410
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007411 bp->port.link_config[1] =
7412 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00007413
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007414 bp->link_params.multi_phy_config =
7415 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00007416 /* If the device is capable of WoL, set the default state according
7417 * to the HW
7418 */
Eilon Greenstein4d295db2009-07-21 05:47:47 +00007419 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00007420 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
7421 (config & PORT_FEATURE_WOL_ENABLED));
7422
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007423 BNX2X_DEV_INFO("lane_config 0x%08x "
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007424 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007425 bp->link_params.lane_config,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007426 bp->link_params.speed_cap_mask[0],
7427 bp->port.link_config[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007428
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007429 bp->link_params.switch_cfg = (bp->port.link_config[0] &
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007430 PORT_FEATURE_CONNECTED_SWITCH_MASK);
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007431 bnx2x_phy_probe(&bp->link_params);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007432 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007433
7434 bnx2x_link_settings_requested(bp);
7435
Eilon Greenstein01cd4522009-08-12 08:23:08 +00007436 /*
7437 * If connected directly, work with the internal PHY, otherwise, work
7438 * with the external PHY
7439 */
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007440 ext_phy_config =
7441 SHMEM_RD(bp,
7442 dev_info.port_hw_config[port].external_phy_config);
7443 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00007444 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007445 bp->mdio.prtad = bp->port.phy_addr;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00007446
7447 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
7448 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
7449 bp->mdio.prtad =
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007450 XGXS_EXT_PHY_ADDR(ext_phy_config);
Yaniv Rosner5866df62011-01-30 04:15:07 +00007451
7452 /*
7453 * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
7454 * In MF mode, it is set to cover self test cases
7455 */
7456 if (IS_MF(bp))
7457 bp->port.need_hw_lock = 1;
7458 else
7459 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
7460 bp->common.shmem_base,
7461 bp->common.shmem2_base);
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08007462}
Eilon Greenstein01cd4522009-08-12 08:23:08 +00007463
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00007464#ifdef BCM_CNIC
7465static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
7466{
7467 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
7468 drv_lic_key[BP_PORT(bp)].max_iscsi_conn);
7469 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
7470 drv_lic_key[BP_PORT(bp)].max_fcoe_conn);
7471
7472 /* Get the number of maximum allowed iSCSI and FCoE connections */
7473 bp->cnic_eth_dev.max_iscsi_conn =
7474 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
7475 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
7476
7477 bp->cnic_eth_dev.max_fcoe_conn =
7478 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
7479 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
7480
7481 BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
7482 bp->cnic_eth_dev.max_iscsi_conn,
7483 bp->cnic_eth_dev.max_fcoe_conn);
7484
7485 /* If mamimum allowed number of connections is zero -
7486 * disable the feature.
7487 */
7488 if (!bp->cnic_eth_dev.max_iscsi_conn)
7489 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
7490
7491 if (!bp->cnic_eth_dev.max_fcoe_conn)
7492 bp->flags |= NO_FCOE_FLAG;
7493}
7494#endif
7495
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08007496static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
7497{
7498 u32 val, val2;
7499 int func = BP_ABS_FUNC(bp);
7500 int port = BP_PORT(bp);
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00007501#ifdef BCM_CNIC
7502 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
7503 u8 *fip_mac = bp->fip_mac;
7504#endif
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08007505
7506 if (BP_NOMCP(bp)) {
7507 BNX2X_ERROR("warning: random MAC workaround active\n");
7508 random_ether_addr(bp->dev->dev_addr);
7509 } else if (IS_MF(bp)) {
7510 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
7511 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
7512 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7513 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
7514 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
7515
7516#ifdef BCM_CNIC
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00007517 /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
7518 * FCoE MAC then the appropriate feature should be disabled.
7519 */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08007520 if (IS_MF_SI(bp)) {
7521 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
7522 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
7523 val2 = MF_CFG_RD(bp, func_ext_config[func].
7524 iscsi_mac_addr_upper);
7525 val = MF_CFG_RD(bp, func_ext_config[func].
7526 iscsi_mac_addr_lower);
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00007527 BNX2X_DEV_INFO("Read iSCSI MAC: "
7528 "0x%x:0x%04x\n", val2, val);
7529 bnx2x_set_mac_buf(iscsi_mac, val, val2);
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00007530 } else
7531 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
7532
7533 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
7534 val2 = MF_CFG_RD(bp, func_ext_config[func].
7535 fcoe_mac_addr_upper);
7536 val = MF_CFG_RD(bp, func_ext_config[func].
7537 fcoe_mac_addr_lower);
7538 BNX2X_DEV_INFO("Read FCoE MAC to "
7539 "0x%x:0x%04x\n", val2, val);
7540 bnx2x_set_mac_buf(fip_mac, val, val2);
7541
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00007542 } else
7543 bp->flags |= NO_FCOE_FLAG;
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08007544 }
7545#endif
7546 } else {
7547 /* in SF read MACs from port configuration */
7548 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7549 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7550 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
7551
7552#ifdef BCM_CNIC
7553 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
7554 iscsi_mac_upper);
7555 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
7556 iscsi_mac_lower);
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00007557 bnx2x_set_mac_buf(iscsi_mac, val, val2);
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08007558#endif
7559 }
7560
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007561 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7562 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
Michael Chan37b091b2009-10-10 13:46:55 +00007563
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00007564#ifdef BCM_CNIC
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00007565 /* Set the FCoE MAC in modes other then MF_SI */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00007566 if (!CHIP_IS_E1x(bp)) {
7567 if (IS_MF_SD(bp))
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00007568 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
7569 else if (!IS_MF(bp))
7570 memcpy(fip_mac, iscsi_mac, ETH_ALEN);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00007571 }
Dmitry Kravkov426b9242011-05-04 23:49:53 +00007572
7573 /* Disable iSCSI if MAC configuration is
7574 * invalid.
7575 */
7576 if (!is_valid_ether_addr(iscsi_mac)) {
7577 bp->flags |= NO_ISCSI_FLAG;
7578 memset(iscsi_mac, 0, ETH_ALEN);
7579 }
7580
7581 /* Disable FCoE if MAC configuration is
7582 * invalid.
7583 */
7584 if (!is_valid_ether_addr(fip_mac)) {
7585 bp->flags |= NO_FCOE_FLAG;
7586 memset(bp->fip_mac, 0, ETH_ALEN);
7587 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00007588#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007589}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007590
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007591static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7592{
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08007593 int /*abs*/func = BP_ABS_FUNC(bp);
David S. Millerb8ee8322011-04-17 16:56:12 -07007594 int vn;
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08007595 u32 val = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007596 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007597
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007598 bnx2x_get_common_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007599
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007600 if (CHIP_IS_E1x(bp)) {
7601 bp->common.int_block = INT_BLOCK_HC;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007602
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007603 bp->igu_dsb_id = DEF_SB_IGU_ID;
7604 bp->igu_base_sb = 0;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00007605 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
7606 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007607 } else {
7608 bp->common.int_block = INT_BLOCK_IGU;
7609 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
7610 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
7611 DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
7612 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
7613 } else
7614 DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
7615
7616 bnx2x_get_igu_cam_info(bp);
7617
7618 }
7619 DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
7620 bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
7621
7622 /*
7623 * Initialize MF configuration
7624 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007625
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00007626 bp->mf_ov = 0;
7627 bp->mf_mode = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007628 vn = BP_E1HVN(bp);
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08007629
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007630 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08007631 DP(NETIF_MSG_PROBE,
7632 "shmem2base 0x%x, size %d, mfcfg offset %d\n",
7633 bp->common.shmem2_base, SHMEM2_RD(bp, size),
7634 (u32)offsetof(struct shmem2_region, mf_cfg_addr));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007635 if (SHMEM2_HAS(bp, mf_cfg_addr))
7636 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
7637 else
7638 bp->common.mf_cfg_base = bp->common.shmem_base +
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007639 offsetof(struct shmem_region, func_mb) +
7640 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08007641 /*
7642 * get mf configuration:
Lucas De Marchi25985ed2011-03-30 22:57:33 -03007643 * 1. existence of MF configuration
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08007644 * 2. MAC address must be legal (check only upper bytes)
7645 * for Switch-Independent mode;
7646 * OVLAN must be legal for Switch-Dependent mode
7647 * 3. SF_MODE configures specific MF mode
7648 */
7649 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
7650 /* get mf configuration */
7651 val = SHMEM_RD(bp,
7652 dev_info.shared_feature_config.config);
7653 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007654
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08007655 switch (val) {
7656 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
7657 val = MF_CFG_RD(bp, func_mf_config[func].
7658 mac_upper);
7659 /* check for legal mac (upper bytes)*/
7660 if (val != 0xffff) {
7661 bp->mf_mode = MULTI_FUNCTION_SI;
7662 bp->mf_config[vn] = MF_CFG_RD(bp,
7663 func_mf_config[func].config);
7664 } else
7665 DP(NETIF_MSG_PROBE, "illegal MAC "
7666 "address for SI\n");
7667 break;
7668 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
7669 /* get OV configuration */
7670 val = MF_CFG_RD(bp,
7671 func_mf_config[FUNC_0].e1hov_tag);
7672 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
7673
7674 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7675 bp->mf_mode = MULTI_FUNCTION_SD;
7676 bp->mf_config[vn] = MF_CFG_RD(bp,
7677 func_mf_config[func].config);
7678 } else
7679 DP(NETIF_MSG_PROBE, "illegal OV for "
7680 "SD\n");
7681 break;
7682 default:
7683 /* Unknown configuration: reset mf_config */
7684 bp->mf_config[vn] = 0;
Lucas De Marchi25985ed2011-03-30 22:57:33 -03007685 DP(NETIF_MSG_PROBE, "Unknown MF mode 0x%x\n",
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08007686 val);
7687 }
7688 }
7689
Eilon Greenstein2691d512009-08-12 08:22:08 +00007690 BNX2X_DEV_INFO("%s function mode\n",
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00007691 IS_MF(bp) ? "multi" : "single");
Eilon Greenstein2691d512009-08-12 08:22:08 +00007692
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08007693 switch (bp->mf_mode) {
7694 case MULTI_FUNCTION_SD:
7695 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
7696 FUNC_MF_CFG_E1HOV_TAG_MASK;
Eilon Greenstein2691d512009-08-12 08:22:08 +00007697 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00007698 bp->mf_ov = val;
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08007699 BNX2X_DEV_INFO("MF OV for func %d is %d"
7700 " (0x%04x)\n", func,
7701 bp->mf_ov, bp->mf_ov);
Eilon Greenstein2691d512009-08-12 08:22:08 +00007702 } else {
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08007703 BNX2X_ERR("No valid MF OV for func %d,"
7704 " aborting\n", func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007705 rc = -EPERM;
7706 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08007707 break;
7708 case MULTI_FUNCTION_SI:
7709 BNX2X_DEV_INFO("func %d is in MF "
7710 "switch-independent mode\n", func);
7711 break;
7712 default:
7713 if (vn) {
7714 BNX2X_ERR("VN %d in single function mode,"
7715 " aborting\n", vn);
Eilon Greenstein2691d512009-08-12 08:22:08 +00007716 rc = -EPERM;
7717 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08007718 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007719 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08007720
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007721 }
7722
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007723 /* adjust igu_sb_cnt to MF for E1x */
7724 if (CHIP_IS_E1x(bp) && IS_MF(bp))
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007725 bp->igu_sb_cnt /= E1HVN_MAX;
7726
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007727 /*
7728 * adjust E2 sb count: to be removed when FW will support
7729 * more then 16 L2 clients
7730 */
7731#define MAX_L2_CLIENTS 16
7732 if (CHIP_IS_E2(bp))
7733 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
7734 MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
7735
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007736 if (!BP_NOMCP(bp)) {
7737 bnx2x_get_port_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007738
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007739 bp->fw_seq =
7740 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
7741 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007742 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7743 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007744
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08007745 /* Get MAC addresses */
7746 bnx2x_get_mac_hwinfo(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007747
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00007748#ifdef BCM_CNIC
7749 bnx2x_get_cnic_info(bp);
7750#endif
7751
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007752 return rc;
7753}
7754
Vladislav Zolotarov34f24c72010-04-19 01:13:23 +00007755static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
7756{
7757 int cnt, i, block_end, rodi;
7758 char vpd_data[BNX2X_VPD_LEN+1];
7759 char str_id_reg[VENDOR_ID_LEN+1];
7760 char str_id_cap[VENDOR_ID_LEN+1];
7761 u8 len;
7762
7763 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
7764 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
7765
7766 if (cnt < BNX2X_VPD_LEN)
7767 goto out_not_found;
7768
7769 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
7770 PCI_VPD_LRDT_RO_DATA);
7771 if (i < 0)
7772 goto out_not_found;
7773
7774
7775 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
7776 pci_vpd_lrdt_size(&vpd_data[i]);
7777
7778 i += PCI_VPD_LRDT_TAG_SIZE;
7779
7780 if (block_end > BNX2X_VPD_LEN)
7781 goto out_not_found;
7782
7783 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
7784 PCI_VPD_RO_KEYWORD_MFR_ID);
7785 if (rodi < 0)
7786 goto out_not_found;
7787
7788 len = pci_vpd_info_field_size(&vpd_data[rodi]);
7789
7790 if (len != VENDOR_ID_LEN)
7791 goto out_not_found;
7792
7793 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
7794
7795 /* vendor specific info */
7796 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
7797 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
7798 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
7799 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
7800
7801 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
7802 PCI_VPD_RO_KEYWORD_VENDOR0);
7803 if (rodi >= 0) {
7804 len = pci_vpd_info_field_size(&vpd_data[rodi]);
7805
7806 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
7807
7808 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
7809 memcpy(bp->fw_ver, &vpd_data[rodi], len);
7810 bp->fw_ver[len] = ' ';
7811 }
7812 }
7813 return;
7814 }
7815out_not_found:
7816 return;
7817}
7818
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007819static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7820{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007821 int func;
Eilon Greenstein87942b42009-02-12 08:36:49 +00007822 int timer_interval;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007823 int rc;
7824
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007825 mutex_init(&bp->port.phy_mutex);
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07007826 mutex_init(&bp->fw_mb_mutex);
David S. Millerbb7e95c2010-07-27 21:01:35 -07007827 spin_lock_init(&bp->stats_lock);
Michael Chan993ac7b2009-10-10 13:46:56 +00007828#ifdef BCM_CNIC
7829 mutex_init(&bp->cnic_mutex);
7830#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007831
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08007832 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007833 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007834
7835 rc = bnx2x_get_hwinfo(bp);
7836
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007837 if (!rc)
7838 rc = bnx2x_alloc_mem_bp(bp);
7839
Vladislav Zolotarov34f24c72010-04-19 01:13:23 +00007840 bnx2x_read_fwinfo(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007841
7842 func = BP_FUNC(bp);
7843
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007844 /* need to reset chip if undi was active */
7845 if (!BP_NOMCP(bp))
7846 bnx2x_undi_unload(bp);
7847
7848 if (CHIP_REV_IS_FPGA(bp))
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007849 dev_err(&bp->pdev->dev, "FPGA detected\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007850
7851 if (BP_NOMCP(bp) && (func == 0))
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007852 dev_err(&bp->pdev->dev, "MCP disabled, "
7853 "must load devices in order!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007854
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007855 bp->multi_mode = multi_mode;
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007856
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07007857 /* Set TPA flags */
7858 if (disable_tpa) {
7859 bp->flags &= ~TPA_ENABLE_FLAG;
7860 bp->dev->features &= ~NETIF_F_LRO;
7861 } else {
7862 bp->flags |= TPA_ENABLE_FLAG;
7863 bp->dev->features |= NETIF_F_LRO;
7864 }
Dmitry Kravkov5d7cd492010-07-27 12:32:19 +00007865 bp->disable_tpa = disable_tpa;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07007866
Eilon Greensteina18f5122009-08-12 08:23:26 +00007867 if (CHIP_IS_E1(bp))
7868 bp->dropless_fc = 0;
7869 else
7870 bp->dropless_fc = dropless_fc;
7871
Eilon Greenstein8d5726c2009-02-12 08:37:19 +00007872 bp->mrrs = mrrs;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07007873
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007874 bp->tx_ring_size = MAX_TX_AVAIL;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007875
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00007876 /* make sure that the numbers are in the right granularity */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007877 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
7878 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007879
Eilon Greenstein87942b42009-02-12 08:36:49 +00007880 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7881 bp->current_interval = (poll ? poll : timer_interval);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007882
7883 init_timer(&bp->timer);
7884 bp->timer.expires = jiffies + bp->current_interval;
7885 bp->timer.data = (unsigned long) bp;
7886 bp->timer.function = bnx2x_timer;
7887
Shmulik Ravid785b9b12010-12-30 06:27:03 +00007888 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
Vladislav Zolotarove4901dd2010-12-13 05:44:18 +00007889 bnx2x_dcbx_init_params(bp);
7890
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007891 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007892}
7893
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007894
Dmitry Kravkovde0c62d2010-07-27 12:35:24 +00007895/****************************************************************************
7896* General service functions
7897****************************************************************************/
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007898
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07007899/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007900static int bnx2x_open(struct net_device *dev)
7901{
7902 struct bnx2x *bp = netdev_priv(dev);
7903
Eilon Greenstein6eccabb2009-01-22 03:37:48 +00007904 netif_carrier_off(dev);
7905
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007906 bnx2x_set_power_state(bp, PCI_D0);
7907
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007908 if (!bnx2x_reset_is_done(bp)) {
7909 do {
7910 /* Reset MCP mail box sequence if there is on going
7911 * recovery
7912 */
7913 bp->fw_seq = 0;
7914
7915 /* If it's the first function to load and reset done
7916 * is still not cleared it may mean that. We don't
7917 * check the attention state here because it may have
7918 * already been cleared by a "common" reset but we
7919 * shell proceed with "process kill" anyway.
7920 */
7921 if ((bnx2x_get_load_cnt(bp) == 0) &&
7922 bnx2x_trylock_hw_lock(bp,
7923 HW_LOCK_RESOURCE_RESERVED_08) &&
7924 (!bnx2x_leader_reset(bp))) {
7925 DP(NETIF_MSG_HW, "Recovered in open\n");
7926 break;
7927 }
7928
7929 bnx2x_set_power_state(bp, PCI_D3hot);
7930
7931 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
7932 " completed yet. Try again later. If u still see this"
7933 " message after a few retries then power cycle is"
7934 " required.\n", bp->dev->name);
7935
7936 return -EAGAIN;
7937 } while (0);
7938 }
7939
7940 bp->recovery_state = BNX2X_RECOVERY_DONE;
7941
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07007942 return bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007943}
7944
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07007945/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007946static int bnx2x_close(struct net_device *dev)
7947{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007948 struct bnx2x *bp = netdev_priv(dev);
7949
7950 /* Unload the driver, release IRQs */
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07007951 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
Vladislav Zolotarovd3dbfee2010-04-19 01:14:49 +00007952 bnx2x_set_power_state(bp, PCI_D3hot);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007953
7954 return 0;
7955}
7956
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08007957#define E1_MAX_UC_LIST 29
7958#define E1H_MAX_UC_LIST 30
7959#define E2_MAX_UC_LIST 14
7960static inline u8 bnx2x_max_uc_list(struct bnx2x *bp)
7961{
7962 if (CHIP_IS_E1(bp))
7963 return E1_MAX_UC_LIST;
7964 else if (CHIP_IS_E1H(bp))
7965 return E1H_MAX_UC_LIST;
7966 else
7967 return E2_MAX_UC_LIST;
7968}
7969
7970
7971static inline u8 bnx2x_uc_list_cam_offset(struct bnx2x *bp)
7972{
7973 if (CHIP_IS_E1(bp))
7974 /* CAM Entries for Port0:
7975 * 0 - prim ETH MAC
7976 * 1 - BCAST MAC
7977 * 2 - iSCSI L2 ring ETH MAC
7978 * 3-31 - UC MACs
7979 *
7980 * Port1 entries are allocated the same way starting from
7981 * entry 32.
7982 */
7983 return 3 + 32 * BP_PORT(bp);
7984 else if (CHIP_IS_E1H(bp)) {
7985 /* CAM Entries:
7986 * 0-7 - prim ETH MAC for each function
7987 * 8-15 - iSCSI L2 ring ETH MAC for each function
7988 * 16 till 255 UC MAC lists for each function
7989 *
7990 * Remark: There is no FCoE support for E1H, thus FCoE related
7991 * MACs are not considered.
7992 */
7993 return E1H_FUNC_MAX * (CAM_ISCSI_ETH_LINE + 1) +
7994 bnx2x_max_uc_list(bp) * BP_FUNC(bp);
7995 } else {
7996 /* CAM Entries (there is a separate CAM per engine):
7997 * 0-4 - prim ETH MAC for each function
7998 * 4-7 - iSCSI L2 ring ETH MAC for each function
7999 * 8-11 - FIP ucast L2 MAC for each function
8000 * 12-15 - ALL_ENODE_MACS mcast MAC for each function
8001 * 16 till 71 UC MAC lists for each function
8002 */
8003 u8 func_idx =
8004 (CHIP_MODE_IS_4_PORT(bp) ? BP_FUNC(bp) : BP_VN(bp));
8005
8006 return E2_FUNC_MAX * (CAM_MAX_PF_LINE + 1) +
8007 bnx2x_max_uc_list(bp) * func_idx;
8008 }
8009}
8010
8011/* set uc list, do not wait as wait implies sleep and
8012 * set_rx_mode can be invoked from non-sleepable context.
8013 *
8014 * Instead we use the same ramrod data buffer each time we need
8015 * to configure a list of addresses, and use the fact that the
8016 * list of MACs is changed in an incremental way and that the
8017 * function is called under the netif_addr_lock. A temporary
8018 * inconsistent CAM configuration (possible in case of very fast
8019 * sequence of add/del/add on the host side) will shortly be
8020 * restored by the handler of the last ramrod.
8021 */
8022static int bnx2x_set_uc_list(struct bnx2x *bp)
8023{
8024 int i = 0, old;
8025 struct net_device *dev = bp->dev;
8026 u8 offset = bnx2x_uc_list_cam_offset(bp);
8027 struct netdev_hw_addr *ha;
8028 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
8029 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
8030
8031 if (netdev_uc_count(dev) > bnx2x_max_uc_list(bp))
8032 return -EINVAL;
8033
8034 netdev_for_each_uc_addr(ha, dev) {
8035 /* copy mac */
8036 config_cmd->config_table[i].msb_mac_addr =
8037 swab16(*(u16 *)&bnx2x_uc_addr(ha)[0]);
8038 config_cmd->config_table[i].middle_mac_addr =
8039 swab16(*(u16 *)&bnx2x_uc_addr(ha)[2]);
8040 config_cmd->config_table[i].lsb_mac_addr =
8041 swab16(*(u16 *)&bnx2x_uc_addr(ha)[4]);
8042
8043 config_cmd->config_table[i].vlan_id = 0;
8044 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
8045 config_cmd->config_table[i].clients_bit_vector =
8046 cpu_to_le32(1 << BP_L_ID(bp));
8047
8048 SET_FLAG(config_cmd->config_table[i].flags,
8049 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
8050 T_ETH_MAC_COMMAND_SET);
8051
8052 DP(NETIF_MSG_IFUP,
8053 "setting UCAST[%d] (%04x:%04x:%04x)\n", i,
8054 config_cmd->config_table[i].msb_mac_addr,
8055 config_cmd->config_table[i].middle_mac_addr,
8056 config_cmd->config_table[i].lsb_mac_addr);
8057
8058 i++;
8059
8060 /* Set uc MAC in NIG */
8061 bnx2x_set_mac_in_nig(bp, 1, bnx2x_uc_addr(ha),
8062 LLH_CAM_ETH_LINE + i);
8063 }
8064 old = config_cmd->hdr.length;
8065 if (old > i) {
8066 for (; i < old; i++) {
8067 if (CAM_IS_INVALID(config_cmd->
8068 config_table[i])) {
8069 /* already invalidated */
8070 break;
8071 }
8072 /* invalidate */
8073 SET_FLAG(config_cmd->config_table[i].flags,
8074 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
8075 T_ETH_MAC_COMMAND_INVALIDATE);
8076 }
8077 }
8078
8079 wmb();
8080
8081 config_cmd->hdr.length = i;
8082 config_cmd->hdr.offset = offset;
8083 config_cmd->hdr.client_id = 0xff;
8084 /* Mark that this ramrod doesn't use bp->set_mac_pending for
8085 * synchronization.
8086 */
8087 config_cmd->hdr.echo = 0;
8088
8089 mb();
8090
8091 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
8092 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
8093
8094}
8095
8096void bnx2x_invalidate_uc_list(struct bnx2x *bp)
8097{
8098 int i;
8099 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
8100 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
8101 int ramrod_flags = WAIT_RAMROD_COMMON;
8102 u8 offset = bnx2x_uc_list_cam_offset(bp);
8103 u8 max_list_size = bnx2x_max_uc_list(bp);
8104
8105 for (i = 0; i < max_list_size; i++) {
8106 SET_FLAG(config_cmd->config_table[i].flags,
8107 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
8108 T_ETH_MAC_COMMAND_INVALIDATE);
8109 bnx2x_set_mac_in_nig(bp, 0, NULL, LLH_CAM_ETH_LINE + 1 + i);
8110 }
8111
8112 wmb();
8113
8114 config_cmd->hdr.length = max_list_size;
8115 config_cmd->hdr.offset = offset;
8116 config_cmd->hdr.client_id = 0xff;
8117 /* We'll wait for a completion this time... */
8118 config_cmd->hdr.echo = 1;
8119
8120 bp->set_mac_pending = 1;
8121
8122 mb();
8123
8124 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
8125 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
8126
8127 /* Wait for a completion */
8128 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
8129 ramrod_flags);
8130
8131}
8132
8133static inline int bnx2x_set_mc_list(struct bnx2x *bp)
8134{
8135 /* some multicasts */
8136 if (CHIP_IS_E1(bp)) {
8137 return bnx2x_set_e1_mc_list(bp);
8138 } else { /* E1H and newer */
8139 return bnx2x_set_e1h_mc_list(bp);
8140 }
8141}
8142
Eilon Greensteinf5372252009-02-12 08:38:30 +00008143/* called with netif_tx_lock from dev_mcast.c */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00008144void bnx2x_set_rx_mode(struct net_device *dev)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008145{
8146 struct bnx2x *bp = netdev_priv(dev);
8147 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008148
8149 if (bp->state != BNX2X_STATE_OPEN) {
8150 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
8151 return;
8152 }
8153
8154 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
8155
8156 if (dev->flags & IFF_PROMISC)
8157 rx_mode = BNX2X_RX_MODE_PROMISC;
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08008158 else if (dev->flags & IFF_ALLMULTI)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008159 rx_mode = BNX2X_RX_MODE_ALLMULTI;
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08008160 else {
8161 /* some multicasts */
8162 if (bnx2x_set_mc_list(bp))
8163 rx_mode = BNX2X_RX_MODE_ALLMULTI;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008164
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08008165 /* some unicasts */
8166 if (bnx2x_set_uc_list(bp))
8167 rx_mode = BNX2X_RX_MODE_PROMISC;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008168 }
8169
8170 bp->rx_mode = rx_mode;
8171 bnx2x_set_storm_rx_mode(bp);
8172}
8173
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008174/* called with rtnl_lock */
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008175static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
8176 int devad, u16 addr)
8177{
8178 struct bnx2x *bp = netdev_priv(netdev);
8179 u16 value;
8180 int rc;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008181
8182 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
8183 prtad, devad, addr);
8184
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008185 /* The HW expects different devad if CL22 is used */
8186 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8187
8188 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnere10bc842010-09-07 11:40:50 +00008189 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008190 bnx2x_release_phy_lock(bp);
8191 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
8192
8193 if (!rc)
8194 rc = value;
8195 return rc;
8196}
8197
8198/* called with rtnl_lock */
8199static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
8200 u16 addr, u16 value)
8201{
8202 struct bnx2x *bp = netdev_priv(netdev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008203 int rc;
8204
8205 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
8206 " value 0x%x\n", prtad, devad, addr, value);
8207
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008208 /* The HW expects different devad if CL22 is used */
8209 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8210
8211 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnere10bc842010-09-07 11:40:50 +00008212 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008213 bnx2x_release_phy_lock(bp);
8214 return rc;
8215}
8216
8217/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008218static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8219{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008220 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008221 struct mii_ioctl_data *mdio = if_mii(ifr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008222
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008223 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
8224 mdio->phy_id, mdio->reg_num, mdio->val_in);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008225
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008226 if (!netif_running(dev))
8227 return -EAGAIN;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008228
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008229 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008230}
8231
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +00008232#ifdef CONFIG_NET_POLL_CONTROLLER
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008233static void poll_bnx2x(struct net_device *dev)
8234{
8235 struct bnx2x *bp = netdev_priv(dev);
8236
8237 disable_irq(bp->pdev->irq);
8238 bnx2x_interrupt(bp->pdev->irq, dev);
8239 enable_irq(bp->pdev->irq);
8240}
8241#endif
8242
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008243static const struct net_device_ops bnx2x_netdev_ops = {
8244 .ndo_open = bnx2x_open,
8245 .ndo_stop = bnx2x_close,
8246 .ndo_start_xmit = bnx2x_start_xmit,
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00008247 .ndo_select_queue = bnx2x_select_queue,
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08008248 .ndo_set_rx_mode = bnx2x_set_rx_mode,
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008249 .ndo_set_mac_address = bnx2x_change_mac_addr,
8250 .ndo_validate_addr = eth_validate_addr,
8251 .ndo_do_ioctl = bnx2x_ioctl,
8252 .ndo_change_mtu = bnx2x_change_mtu,
Michał Mirosław66371c42011-04-12 09:38:23 +00008253 .ndo_fix_features = bnx2x_fix_features,
8254 .ndo_set_features = bnx2x_set_features,
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008255 .ndo_tx_timeout = bnx2x_tx_timeout,
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +00008256#ifdef CONFIG_NET_POLL_CONTROLLER
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008257 .ndo_poll_controller = poll_bnx2x,
8258#endif
8259};
8260
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008261static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
8262 struct net_device *dev)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008263{
8264 struct bnx2x *bp;
8265 int rc;
8266
8267 SET_NETDEV_DEV(dev, &pdev->dev);
8268 bp = netdev_priv(dev);
8269
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008270 bp->dev = dev;
8271 bp->pdev = pdev;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008272 bp->flags = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008273 bp->pf_num = PCI_FUNC(pdev->devfn);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008274
8275 rc = pci_enable_device(pdev);
8276 if (rc) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008277 dev_err(&bp->pdev->dev,
8278 "Cannot enable PCI device, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008279 goto err_out;
8280 }
8281
8282 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008283 dev_err(&bp->pdev->dev,
8284 "Cannot find PCI device base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008285 rc = -ENODEV;
8286 goto err_out_disable;
8287 }
8288
8289 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008290 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
8291 " base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008292 rc = -ENODEV;
8293 goto err_out_disable;
8294 }
8295
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008296 if (atomic_read(&pdev->enable_cnt) == 1) {
8297 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8298 if (rc) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008299 dev_err(&bp->pdev->dev,
8300 "Cannot obtain PCI resources, aborting\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008301 goto err_out_disable;
8302 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008303
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008304 pci_set_master(pdev);
8305 pci_save_state(pdev);
8306 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008307
8308 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8309 if (bp->pm_cap == 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008310 dev_err(&bp->pdev->dev,
8311 "Cannot find power management capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008312 rc = -EIO;
8313 goto err_out_release;
8314 }
8315
8316 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
8317 if (bp->pcie_cap == 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008318 dev_err(&bp->pdev->dev,
8319 "Cannot find PCI Express capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008320 rc = -EIO;
8321 goto err_out_release;
8322 }
8323
FUJITA Tomonori1a983142010-04-04 01:51:03 +00008324 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008325 bp->flags |= USING_DAC_FLAG;
FUJITA Tomonori1a983142010-04-04 01:51:03 +00008326 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008327 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
8328 " failed, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008329 rc = -EIO;
8330 goto err_out_release;
8331 }
8332
FUJITA Tomonori1a983142010-04-04 01:51:03 +00008333 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008334 dev_err(&bp->pdev->dev,
8335 "System does not support DMA, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008336 rc = -EIO;
8337 goto err_out_release;
8338 }
8339
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008340 dev->mem_start = pci_resource_start(pdev, 0);
8341 dev->base_addr = dev->mem_start;
8342 dev->mem_end = pci_resource_end(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008343
8344 dev->irq = pdev->irq;
8345
Arjan van de Ven275f1652008-10-20 21:42:39 -07008346 bp->regview = pci_ioremap_bar(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008347 if (!bp->regview) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008348 dev_err(&bp->pdev->dev,
8349 "Cannot map register space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008350 rc = -ENOMEM;
8351 goto err_out_release;
8352 }
8353
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008354 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008355 min_t(u64, BNX2X_DB_SIZE(bp),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008356 pci_resource_len(pdev, 2)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008357 if (!bp->doorbells) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008358 dev_err(&bp->pdev->dev,
8359 "Cannot map doorbell space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008360 rc = -ENOMEM;
8361 goto err_out_unmap;
8362 }
8363
8364 bnx2x_set_power_state(bp, PCI_D0);
8365
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008366 /* clean indirect addresses */
8367 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
8368 PCICFG_VENDOR_ID_OFFSET);
8369 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
8370 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
8371 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
8372 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008373
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008374 /* Reset the load counter */
8375 bnx2x_clear_load_cnt(bp);
8376
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008377 dev->watchdog_timeo = TX_TIMEOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008378
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008379 dev->netdev_ops = &bnx2x_netdev_ops;
Dmitry Kravkovde0c62d2010-07-27 12:35:24 +00008380 bnx2x_set_ethtool_ops(dev);
Michał Mirosław66371c42011-04-12 09:38:23 +00008381
8382 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
8383 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
8384 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_HW_VLAN_TX;
8385
8386 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
8387 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
8388
8389 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008390 if (bp->flags & USING_DAC_FLAG)
8391 dev->features |= NETIF_F_HIGHDMA;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008392
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00008393 /* Add Loopback capability to the device */
8394 dev->hw_features |= NETIF_F_LOOPBACK;
8395
Shmulik Ravid98507672011-02-28 12:19:55 -08008396#ifdef BCM_DCBNL
Shmulik Ravid785b9b12010-12-30 06:27:03 +00008397 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
8398#endif
8399
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008400 /* get_port_hwinfo() will set prtad and mmds properly */
8401 bp->mdio.prtad = MDIO_PRTAD_NONE;
8402 bp->mdio.mmds = 0;
8403 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
8404 bp->mdio.dev = dev;
8405 bp->mdio.mdio_read = bnx2x_mdio_read;
8406 bp->mdio.mdio_write = bnx2x_mdio_write;
8407
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008408 return 0;
8409
8410err_out_unmap:
8411 if (bp->regview) {
8412 iounmap(bp->regview);
8413 bp->regview = NULL;
8414 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008415 if (bp->doorbells) {
8416 iounmap(bp->doorbells);
8417 bp->doorbells = NULL;
8418 }
8419
8420err_out_release:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008421 if (atomic_read(&pdev->enable_cnt) == 1)
8422 pci_release_regions(pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008423
8424err_out_disable:
8425 pci_disable_device(pdev);
8426 pci_set_drvdata(pdev, NULL);
8427
8428err_out:
8429 return rc;
8430}
8431
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008432static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
8433 int *width, int *speed)
Eliezer Tamir25047952008-02-28 11:50:16 -08008434{
8435 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
8436
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008437 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
8438
8439 /* return value of 1=2.5GHz 2=5GHz */
8440 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
Eliezer Tamir25047952008-02-28 11:50:16 -08008441}
8442
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008443static int bnx2x_check_firmware(struct bnx2x *bp)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008444{
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008445 const struct firmware *firmware = bp->firmware;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008446 struct bnx2x_fw_file_hdr *fw_hdr;
8447 struct bnx2x_fw_file_section *sections;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008448 u32 offset, len, num_ops;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008449 u16 *ops_offsets;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008450 int i;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008451 const u8 *fw_ver;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008452
8453 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
8454 return -EINVAL;
8455
8456 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
8457 sections = (struct bnx2x_fw_file_section *)fw_hdr;
8458
8459 /* Make sure none of the offsets and sizes make us read beyond
8460 * the end of the firmware data */
8461 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
8462 offset = be32_to_cpu(sections[i].offset);
8463 len = be32_to_cpu(sections[i].len);
8464 if (offset + len > firmware->size) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008465 dev_err(&bp->pdev->dev,
8466 "Section %d length is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008467 return -EINVAL;
8468 }
8469 }
8470
8471 /* Likewise for the init_ops offsets */
8472 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
8473 ops_offsets = (u16 *)(firmware->data + offset);
8474 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
8475
8476 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
8477 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008478 dev_err(&bp->pdev->dev,
8479 "Section offset %d is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008480 return -EINVAL;
8481 }
8482 }
8483
8484 /* Check FW version */
8485 offset = be32_to_cpu(fw_hdr->fw_version.offset);
8486 fw_ver = firmware->data + offset;
8487 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
8488 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
8489 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
8490 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008491 dev_err(&bp->pdev->dev,
8492 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008493 fw_ver[0], fw_ver[1], fw_ver[2],
8494 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
8495 BCM_5710_FW_MINOR_VERSION,
8496 BCM_5710_FW_REVISION_VERSION,
8497 BCM_5710_FW_ENGINEERING_VERSION);
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008498 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008499 }
8500
8501 return 0;
8502}
8503
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008504static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008505{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008506 const __be32 *source = (const __be32 *)_source;
8507 u32 *target = (u32 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008508 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008509
8510 for (i = 0; i < n/4; i++)
8511 target[i] = be32_to_cpu(source[i]);
8512}
8513
8514/*
8515 Ops array is stored in the following format:
8516 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
8517 */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008518static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008519{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008520 const __be32 *source = (const __be32 *)_source;
8521 struct raw_op *target = (struct raw_op *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008522 u32 i, j, tmp;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008523
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008524 for (i = 0, j = 0; i < n/8; i++, j += 2) {
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008525 tmp = be32_to_cpu(source[j]);
8526 target[i].op = (tmp >> 24) & 0xff;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008527 target[i].offset = tmp & 0xffffff;
8528 target[i].raw_data = be32_to_cpu(source[j + 1]);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008529 }
8530}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008531
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008532/**
8533 * IRO array is stored in the following format:
8534 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
8535 */
8536static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
8537{
8538 const __be32 *source = (const __be32 *)_source;
8539 struct iro *target = (struct iro *)_target;
8540 u32 i, j, tmp;
8541
8542 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
8543 target[i].base = be32_to_cpu(source[j]);
8544 j++;
8545 tmp = be32_to_cpu(source[j]);
8546 target[i].m1 = (tmp >> 16) & 0xffff;
8547 target[i].m2 = tmp & 0xffff;
8548 j++;
8549 tmp = be32_to_cpu(source[j]);
8550 target[i].m3 = (tmp >> 16) & 0xffff;
8551 target[i].size = tmp & 0xffff;
8552 j++;
8553 }
8554}
8555
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008556static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008557{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008558 const __be16 *source = (const __be16 *)_source;
8559 u16 *target = (u16 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008560 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008561
8562 for (i = 0; i < n/2; i++)
8563 target[i] = be16_to_cpu(source[i]);
8564}
8565
Joe Perches7995c642010-02-17 15:01:52 +00008566#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
8567do { \
8568 u32 len = be32_to_cpu(fw_hdr->arr.len); \
8569 bp->arr = kmalloc(len, GFP_KERNEL); \
8570 if (!bp->arr) { \
8571 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
8572 goto lbl; \
8573 } \
8574 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
8575 (u8 *)bp->arr, len); \
8576} while (0)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008577
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008578int bnx2x_init_firmware(struct bnx2x *bp)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008579{
Ben Hutchings45229b42009-11-07 11:53:39 +00008580 const char *fw_file_name;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008581 struct bnx2x_fw_file_hdr *fw_hdr;
Ben Hutchings45229b42009-11-07 11:53:39 +00008582 int rc;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008583
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008584 if (CHIP_IS_E1(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +00008585 fw_file_name = FW_FILE_NAME_E1;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008586 else if (CHIP_IS_E1H(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +00008587 fw_file_name = FW_FILE_NAME_E1H;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008588 else if (CHIP_IS_E2(bp))
8589 fw_file_name = FW_FILE_NAME_E2;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008590 else {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008591 BNX2X_ERR("Unsupported chip revision\n");
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008592 return -EINVAL;
8593 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008594
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008595 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008596
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008597 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008598 if (rc) {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008599 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008600 goto request_firmware_exit;
8601 }
8602
8603 rc = bnx2x_check_firmware(bp);
8604 if (rc) {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008605 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008606 goto request_firmware_exit;
8607 }
8608
8609 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
8610
8611 /* Initialize the pointers to the init arrays */
8612 /* Blob */
8613 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
8614
8615 /* Opcodes */
8616 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
8617
8618 /* Offsets */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008619 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
8620 be16_to_cpu_n);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008621
8622 /* STORMs firmware */
Eilon Greenstein573f2032009-08-12 08:24:14 +00008623 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8624 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
8625 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
8626 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
8627 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8628 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
8629 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
8630 be32_to_cpu(fw_hdr->usem_pram_data.offset);
8631 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8632 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
8633 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
8634 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
8635 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8636 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
8637 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
8638 be32_to_cpu(fw_hdr->csem_pram_data.offset);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008639 /* IRO */
8640 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008641
8642 return 0;
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008643
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008644iro_alloc_err:
8645 kfree(bp->init_ops_offsets);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008646init_offsets_alloc_err:
8647 kfree(bp->init_ops);
8648init_ops_alloc_err:
8649 kfree(bp->init_data);
8650request_firmware_exit:
8651 release_firmware(bp->firmware);
8652
8653 return rc;
8654}
8655
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008656static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
8657{
8658 int cid_count = L2_FP_COUNT(l2_cid_count);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008659
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008660#ifdef BCM_CNIC
8661 cid_count += CNIC_CID_MAX;
8662#endif
8663 return roundup(cid_count, QM_CID_ROUND);
8664}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008665
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008666static int __devinit bnx2x_init_one(struct pci_dev *pdev,
8667 const struct pci_device_id *ent)
8668{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008669 struct net_device *dev = NULL;
8670 struct bnx2x *bp;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008671 int pcie_width, pcie_speed;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008672 int rc, cid_count;
8673
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008674 switch (ent->driver_data) {
8675 case BCM57710:
8676 case BCM57711:
8677 case BCM57711E:
8678 cid_count = FP_SB_MAX_E1x;
8679 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008680
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008681 case BCM57712:
8682 case BCM57712E:
8683 cid_count = FP_SB_MAX_E2;
8684 break;
8685
8686 default:
8687 pr_err("Unknown board_type (%ld), aborting\n",
8688 ent->driver_data);
Vasiliy Kulikov870634b2010-11-14 10:08:34 +00008689 return -ENODEV;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008690 }
8691
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00008692 cid_count += NONE_ETH_CONTEXT_USE + CNIC_CONTEXT_USE;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008693
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008694 /* dev zeroed in init_etherdev */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008695 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008696 if (!dev) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008697 dev_err(&pdev->dev, "Cannot allocate net device\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008698 return -ENOMEM;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008699 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008700
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008701 bp = netdev_priv(dev);
Joe Perches7995c642010-02-17 15:01:52 +00008702 bp->msg_enable = debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008703
Eilon Greensteindf4770de2009-08-12 08:23:28 +00008704 pci_set_drvdata(pdev, dev);
8705
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008706 bp->l2_cid_count = cid_count;
8707
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008708 rc = bnx2x_init_dev(pdev, dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008709 if (rc < 0) {
8710 free_netdev(dev);
8711 return rc;
8712 }
8713
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008714 rc = bnx2x_init_bp(bp);
Eilon Greenstein693fc0d2009-01-14 06:43:52 +00008715 if (rc)
8716 goto init_one_exit;
8717
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008718 /* calc qm_cid_count */
8719 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
8720
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00008721#ifdef BCM_CNIC
8722 /* disable FCOE L2 queue for E1x*/
8723 if (CHIP_IS_E1x(bp))
8724 bp->flags |= NO_FCOE_FLAG;
8725
8726#endif
8727
Lucas De Marchi25985ed2011-03-30 22:57:33 -03008728 /* Configure interrupt mode: try to enable MSI-X/MSI if
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00008729 * needed, set bp->num_queues appropriately.
8730 */
8731 bnx2x_set_int_mode(bp);
8732
8733 /* Add all NAPI objects */
8734 bnx2x_add_all_napi(bp);
8735
Vladislav Zolotarovb3400072010-11-24 11:09:50 -08008736 rc = register_netdev(dev);
8737 if (rc) {
8738 dev_err(&pdev->dev, "Cannot register net device\n");
8739 goto init_one_exit;
8740 }
8741
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00008742#ifdef BCM_CNIC
8743 if (!NO_FCOE(bp)) {
8744 /* Add storage MAC address */
8745 rtnl_lock();
8746 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
8747 rtnl_unlock();
8748 }
8749#endif
8750
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008751 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00008752
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008753 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
8754 " IRQ %d, ", board_info[ent->driver_data].name,
8755 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008756 pcie_width,
8757 ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
8758 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
8759 "5GHz (Gen2)" : "2.5GHz",
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008760 dev->base_addr, bp->pdev->irq);
8761 pr_cont("node addr %pM\n", dev->dev_addr);
Eilon Greensteinc0162012009-03-02 08:01:05 +00008762
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008763 return 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008764
8765init_one_exit:
8766 if (bp->regview)
8767 iounmap(bp->regview);
8768
8769 if (bp->doorbells)
8770 iounmap(bp->doorbells);
8771
8772 free_netdev(dev);
8773
8774 if (atomic_read(&pdev->enable_cnt) == 1)
8775 pci_release_regions(pdev);
8776
8777 pci_disable_device(pdev);
8778 pci_set_drvdata(pdev, NULL);
8779
8780 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008781}
8782
8783static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
8784{
8785 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -08008786 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008787
Eliezer Tamir228241e2008-02-28 11:56:57 -08008788 if (!dev) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008789 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
Eliezer Tamir228241e2008-02-28 11:56:57 -08008790 return;
8791 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08008792 bp = netdev_priv(dev);
8793
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00008794#ifdef BCM_CNIC
8795 /* Delete storage MAC address */
8796 if (!NO_FCOE(bp)) {
8797 rtnl_lock();
8798 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
8799 rtnl_unlock();
8800 }
8801#endif
8802
Shmulik Ravid98507672011-02-28 12:19:55 -08008803#ifdef BCM_DCBNL
8804 /* Delete app tlvs from dcbnl */
8805 bnx2x_dcbnl_update_applist(bp, true);
8806#endif
8807
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008808 unregister_netdev(dev);
8809
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00008810 /* Delete all NAPI objects */
8811 bnx2x_del_all_napi(bp);
8812
Vladislav Zolotarov084d6cb2011-01-09 02:20:19 +00008813 /* Power on: we can't let PCI layer write to us while we are in D3 */
8814 bnx2x_set_power_state(bp, PCI_D0);
8815
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00008816 /* Disable MSI/MSI-X */
8817 bnx2x_disable_msi(bp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008818
Vladislav Zolotarov084d6cb2011-01-09 02:20:19 +00008819 /* Power off */
8820 bnx2x_set_power_state(bp, PCI_D3hot);
8821
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008822 /* Make sure RESET task is not scheduled before continuing */
8823 cancel_delayed_work_sync(&bp->reset_task);
8824
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008825 if (bp->regview)
8826 iounmap(bp->regview);
8827
8828 if (bp->doorbells)
8829 iounmap(bp->doorbells);
8830
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008831 bnx2x_free_mem_bp(bp);
8832
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008833 free_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008834
8835 if (atomic_read(&pdev->enable_cnt) == 1)
8836 pci_release_regions(pdev);
8837
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008838 pci_disable_device(pdev);
8839 pci_set_drvdata(pdev, NULL);
8840}
8841
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07008842static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
8843{
8844 int i;
8845
8846 bp->state = BNX2X_STATE_ERROR;
8847
8848 bp->rx_mode = BNX2X_RX_MODE_NONE;
8849
8850 bnx2x_netif_stop(bp, 0);
Stanislaw Gruszkac89af1a2010-05-17 17:35:38 -07008851 netif_carrier_off(bp->dev);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07008852
8853 del_timer_sync(&bp->timer);
8854 bp->stats_state = STATS_STATE_DISABLED;
8855 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
8856
8857 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00008858 bnx2x_free_irq(bp);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07008859
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07008860 /* Free SKBs, SGEs, TPA pool and driver internals */
8861 bnx2x_free_skbs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008862
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00008863 for_each_rx_queue(bp, i)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07008864 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00008865
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07008866 bnx2x_free_mem(bp);
8867
8868 bp->state = BNX2X_STATE_CLOSED;
8869
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07008870 return 0;
8871}
8872
8873static void bnx2x_eeh_recover(struct bnx2x *bp)
8874{
8875 u32 val;
8876
8877 mutex_init(&bp->port.phy_mutex);
8878
8879 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8880 bp->link_params.shmem_base = bp->common.shmem_base;
8881 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
8882
8883 if (!bp->common.shmem_base ||
8884 (bp->common.shmem_base < 0xA0000) ||
8885 (bp->common.shmem_base >= 0xC0000)) {
8886 BNX2X_DEV_INFO("MCP not active\n");
8887 bp->flags |= NO_MCP_FLAG;
8888 return;
8889 }
8890
8891 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8892 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8893 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8894 BNX2X_ERR("BAD MCP validity signature\n");
8895
8896 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008897 bp->fw_seq =
8898 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8899 DRV_MSG_SEQ_NUMBER_MASK);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07008900 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8901 }
8902}
8903
Wendy Xiong493adb12008-06-23 20:36:22 -07008904/**
8905 * bnx2x_io_error_detected - called when PCI error is detected
8906 * @pdev: Pointer to PCI device
8907 * @state: The current pci connection state
8908 *
8909 * This function is called after a PCI bus error affecting
8910 * this device has been detected.
8911 */
8912static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
8913 pci_channel_state_t state)
8914{
8915 struct net_device *dev = pci_get_drvdata(pdev);
8916 struct bnx2x *bp = netdev_priv(dev);
8917
8918 rtnl_lock();
8919
8920 netif_device_detach(dev);
8921
Dean Nelson07ce50e2009-07-31 09:13:25 +00008922 if (state == pci_channel_io_perm_failure) {
8923 rtnl_unlock();
8924 return PCI_ERS_RESULT_DISCONNECT;
8925 }
8926
Wendy Xiong493adb12008-06-23 20:36:22 -07008927 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07008928 bnx2x_eeh_nic_unload(bp);
Wendy Xiong493adb12008-06-23 20:36:22 -07008929
8930 pci_disable_device(pdev);
8931
8932 rtnl_unlock();
8933
8934 /* Request a slot reset */
8935 return PCI_ERS_RESULT_NEED_RESET;
8936}
8937
8938/**
8939 * bnx2x_io_slot_reset - called after the PCI bus has been reset
8940 * @pdev: Pointer to PCI device
8941 *
8942 * Restart the card from scratch, as if from a cold-boot.
8943 */
8944static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
8945{
8946 struct net_device *dev = pci_get_drvdata(pdev);
8947 struct bnx2x *bp = netdev_priv(dev);
8948
8949 rtnl_lock();
8950
8951 if (pci_enable_device(pdev)) {
8952 dev_err(&pdev->dev,
8953 "Cannot re-enable PCI device after reset\n");
8954 rtnl_unlock();
8955 return PCI_ERS_RESULT_DISCONNECT;
8956 }
8957
8958 pci_set_master(pdev);
8959 pci_restore_state(pdev);
8960
8961 if (netif_running(dev))
8962 bnx2x_set_power_state(bp, PCI_D0);
8963
8964 rtnl_unlock();
8965
8966 return PCI_ERS_RESULT_RECOVERED;
8967}
8968
8969/**
8970 * bnx2x_io_resume - called when traffic can start flowing again
8971 * @pdev: Pointer to PCI device
8972 *
8973 * This callback is called when the error recovery driver tells us that
8974 * its OK to resume normal operation.
8975 */
8976static void bnx2x_io_resume(struct pci_dev *pdev)
8977{
8978 struct net_device *dev = pci_get_drvdata(pdev);
8979 struct bnx2x *bp = netdev_priv(dev);
8980
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008981 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008982 printk(KERN_ERR "Handling parity error recovery. "
8983 "Try again later\n");
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008984 return;
8985 }
8986
Wendy Xiong493adb12008-06-23 20:36:22 -07008987 rtnl_lock();
8988
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07008989 bnx2x_eeh_recover(bp);
8990
Wendy Xiong493adb12008-06-23 20:36:22 -07008991 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07008992 bnx2x_nic_load(bp, LOAD_NORMAL);
Wendy Xiong493adb12008-06-23 20:36:22 -07008993
8994 netif_device_attach(dev);
8995
8996 rtnl_unlock();
8997}
8998
8999static struct pci_error_handlers bnx2x_err_handler = {
9000 .error_detected = bnx2x_io_error_detected,
Eilon Greenstein356e2382009-02-12 08:38:32 +00009001 .slot_reset = bnx2x_io_slot_reset,
9002 .resume = bnx2x_io_resume,
Wendy Xiong493adb12008-06-23 20:36:22 -07009003};
9004
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009005static struct pci_driver bnx2x_pci_driver = {
Wendy Xiong493adb12008-06-23 20:36:22 -07009006 .name = DRV_MODULE_NAME,
9007 .id_table = bnx2x_pci_tbl,
9008 .probe = bnx2x_init_one,
9009 .remove = __devexit_p(bnx2x_remove_one),
9010 .suspend = bnx2x_suspend,
9011 .resume = bnx2x_resume,
9012 .err_handler = &bnx2x_err_handler,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009013};
9014
9015static int __init bnx2x_init(void)
9016{
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009017 int ret;
9018
Joe Perches7995c642010-02-17 15:01:52 +00009019 pr_info("%s", version);
Eilon Greenstein938cf542009-08-12 08:23:37 +00009020
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009021 bnx2x_wq = create_singlethread_workqueue("bnx2x");
9022 if (bnx2x_wq == NULL) {
Joe Perches7995c642010-02-17 15:01:52 +00009023 pr_err("Cannot create workqueue\n");
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009024 return -ENOMEM;
9025 }
9026
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009027 ret = pci_register_driver(&bnx2x_pci_driver);
9028 if (ret) {
Joe Perches7995c642010-02-17 15:01:52 +00009029 pr_err("Cannot register driver\n");
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009030 destroy_workqueue(bnx2x_wq);
9031 }
9032 return ret;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009033}
9034
9035static void __exit bnx2x_cleanup(void)
9036{
9037 pci_unregister_driver(&bnx2x_pci_driver);
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009038
9039 destroy_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009040}
9041
9042module_init(bnx2x_init);
9043module_exit(bnx2x_cleanup);
9044
Michael Chan993ac7b2009-10-10 13:46:56 +00009045#ifdef BCM_CNIC
9046
9047/* count denotes the number of new completions we have seen */
9048static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9049{
9050 struct eth_spe *spe;
9051
9052#ifdef BNX2X_STOP_ON_ERROR
9053 if (unlikely(bp->panic))
9054 return;
9055#endif
9056
9057 spin_lock_bh(&bp->spq_lock);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009058 BUG_ON(bp->cnic_spq_pending < count);
Michael Chan993ac7b2009-10-10 13:46:56 +00009059 bp->cnic_spq_pending -= count;
9060
Michael Chan993ac7b2009-10-10 13:46:56 +00009061
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009062 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
9063 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
9064 & SPE_HDR_CONN_TYPE) >>
9065 SPE_HDR_CONN_TYPE_SHIFT;
9066
9067 /* Set validation for iSCSI L2 client before sending SETUP
9068 * ramrod
9069 */
9070 if (type == ETH_CONNECTION_TYPE) {
9071 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
9072 hdr.conn_and_cmd_data) >>
9073 SPE_HDR_CMD_ID_SHIFT) & 0xff;
9074
9075 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
9076 bnx2x_set_ctx_validation(&bp->context.
9077 vcxt[BNX2X_ISCSI_ETH_CID].eth,
9078 HW_CID(bp, BNX2X_ISCSI_ETH_CID));
9079 }
9080
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08009081 /* There may be not more than 8 L2 and not more than 8 L5 SPEs
9082 * We also check that the number of outstanding
9083 * COMMON ramrods is not more than the EQ and SPQ can
9084 * accommodate.
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009085 */
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08009086 if (type == ETH_CONNECTION_TYPE) {
9087 if (!atomic_read(&bp->cq_spq_left))
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009088 break;
9089 else
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08009090 atomic_dec(&bp->cq_spq_left);
9091 } else if (type == NONE_CONNECTION_TYPE) {
9092 if (!atomic_read(&bp->eq_spq_left))
9093 break;
9094 else
9095 atomic_dec(&bp->eq_spq_left);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00009096 } else if ((type == ISCSI_CONNECTION_TYPE) ||
9097 (type == FCOE_CONNECTION_TYPE)) {
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009098 if (bp->cnic_spq_pending >=
9099 bp->cnic_eth_dev.max_kwqe_pending)
9100 break;
9101 else
9102 bp->cnic_spq_pending++;
9103 } else {
9104 BNX2X_ERR("Unknown SPE type: %d\n", type);
9105 bnx2x_panic();
Michael Chan993ac7b2009-10-10 13:46:56 +00009106 break;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009107 }
Michael Chan993ac7b2009-10-10 13:46:56 +00009108
9109 spe = bnx2x_sp_get_next(bp);
9110 *spe = *bp->cnic_kwq_cons;
9111
Michael Chan993ac7b2009-10-10 13:46:56 +00009112 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
9113 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
9114
9115 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
9116 bp->cnic_kwq_cons = bp->cnic_kwq;
9117 else
9118 bp->cnic_kwq_cons++;
9119 }
9120 bnx2x_sp_prod_update(bp);
9121 spin_unlock_bh(&bp->spq_lock);
9122}
9123
9124static int bnx2x_cnic_sp_queue(struct net_device *dev,
9125 struct kwqe_16 *kwqes[], u32 count)
9126{
9127 struct bnx2x *bp = netdev_priv(dev);
9128 int i;
9129
9130#ifdef BNX2X_STOP_ON_ERROR
9131 if (unlikely(bp->panic))
9132 return -EIO;
9133#endif
9134
9135 spin_lock_bh(&bp->spq_lock);
9136
9137 for (i = 0; i < count; i++) {
9138 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
9139
9140 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
9141 break;
9142
9143 *bp->cnic_kwq_prod = *spe;
9144
9145 bp->cnic_kwq_pending++;
9146
9147 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
9148 spe->hdr.conn_and_cmd_data, spe->hdr.type,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009149 spe->data.update_data_addr.hi,
9150 spe->data.update_data_addr.lo,
Michael Chan993ac7b2009-10-10 13:46:56 +00009151 bp->cnic_kwq_pending);
9152
9153 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
9154 bp->cnic_kwq_prod = bp->cnic_kwq;
9155 else
9156 bp->cnic_kwq_prod++;
9157 }
9158
9159 spin_unlock_bh(&bp->spq_lock);
9160
9161 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
9162 bnx2x_cnic_sp_post(bp, 0);
9163
9164 return i;
9165}
9166
9167static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9168{
9169 struct cnic_ops *c_ops;
9170 int rc = 0;
9171
9172 mutex_lock(&bp->cnic_mutex);
Eric Dumazet13707f92011-01-26 19:28:23 +00009173 c_ops = rcu_dereference_protected(bp->cnic_ops,
9174 lockdep_is_held(&bp->cnic_mutex));
Michael Chan993ac7b2009-10-10 13:46:56 +00009175 if (c_ops)
9176 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9177 mutex_unlock(&bp->cnic_mutex);
9178
9179 return rc;
9180}
9181
9182static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9183{
9184 struct cnic_ops *c_ops;
9185 int rc = 0;
9186
9187 rcu_read_lock();
9188 c_ops = rcu_dereference(bp->cnic_ops);
9189 if (c_ops)
9190 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9191 rcu_read_unlock();
9192
9193 return rc;
9194}
9195
9196/*
9197 * for commands that have no data
9198 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00009199int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
Michael Chan993ac7b2009-10-10 13:46:56 +00009200{
9201 struct cnic_ctl_info ctl = {0};
9202
9203 ctl.cmd = cmd;
9204
9205 return bnx2x_cnic_ctl_send(bp, &ctl);
9206}
9207
9208static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
9209{
9210 struct cnic_ctl_info ctl;
9211
9212 /* first we tell CNIC and only then we count this as a completion */
9213 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
9214 ctl.data.comp.cid = cid;
9215
9216 bnx2x_cnic_ctl_send_bh(bp, &ctl);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009217 bnx2x_cnic_sp_post(bp, 0);
Michael Chan993ac7b2009-10-10 13:46:56 +00009218}
9219
9220static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
9221{
9222 struct bnx2x *bp = netdev_priv(dev);
9223 int rc = 0;
9224
9225 switch (ctl->cmd) {
9226 case DRV_CTL_CTXTBL_WR_CMD: {
9227 u32 index = ctl->data.io.offset;
9228 dma_addr_t addr = ctl->data.io.dma_addr;
9229
9230 bnx2x_ilt_wr(bp, index, addr);
9231 break;
9232 }
9233
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009234 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
9235 int count = ctl->data.credit.credit_count;
Michael Chan993ac7b2009-10-10 13:46:56 +00009236
9237 bnx2x_cnic_sp_post(bp, count);
9238 break;
9239 }
9240
9241 /* rtnl_lock is held. */
9242 case DRV_CTL_START_L2_CMD: {
9243 u32 cli = ctl->data.ring.client_id;
9244
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00009245 /* Clear FCoE FIP and ALL ENODE MACs addresses first */
9246 bnx2x_del_fcoe_eth_macs(bp);
9247
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009248 /* Set iSCSI MAC address */
9249 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
9250
9251 mmiowb();
9252 barrier();
9253
9254 /* Start accepting on iSCSI L2 ring. Accept all multicasts
9255 * because it's the only way for UIO Client to accept
9256 * multicasts (in non-promiscuous mode only one Client per
9257 * function will receive multicast packets (leading in our
9258 * case).
9259 */
9260 bnx2x_rxq_set_mac_filters(bp, cli,
9261 BNX2X_ACCEPT_UNICAST |
9262 BNX2X_ACCEPT_BROADCAST |
9263 BNX2X_ACCEPT_ALL_MULTICAST);
9264 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9265
Michael Chan993ac7b2009-10-10 13:46:56 +00009266 break;
9267 }
9268
9269 /* rtnl_lock is held. */
9270 case DRV_CTL_STOP_L2_CMD: {
9271 u32 cli = ctl->data.ring.client_id;
9272
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009273 /* Stop accepting on iSCSI L2 ring */
9274 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
9275 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9276
9277 mmiowb();
9278 barrier();
9279
9280 /* Unset iSCSI L2 MAC */
9281 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
Michael Chan993ac7b2009-10-10 13:46:56 +00009282 break;
9283 }
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009284 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
9285 int count = ctl->data.credit.credit_count;
9286
9287 smp_mb__before_atomic_inc();
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08009288 atomic_add(count, &bp->cq_spq_left);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009289 smp_mb__after_atomic_inc();
9290 break;
9291 }
Michael Chan993ac7b2009-10-10 13:46:56 +00009292
Dmitry Kravkovfab0dc82011-03-31 17:04:22 -07009293 case DRV_CTL_ISCSI_STOPPED_CMD: {
9294 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_ISCSI_STOPPED);
9295 break;
9296 }
9297
Michael Chan993ac7b2009-10-10 13:46:56 +00009298 default:
9299 BNX2X_ERR("unknown command %x\n", ctl->cmd);
9300 rc = -EINVAL;
9301 }
9302
9303 return rc;
9304}
9305
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00009306void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
Michael Chan993ac7b2009-10-10 13:46:56 +00009307{
9308 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9309
9310 if (bp->flags & USING_MSIX_FLAG) {
9311 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
9312 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
9313 cp->irq_arr[0].vector = bp->msix_table[1].vector;
9314 } else {
9315 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
9316 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
9317 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009318 if (CHIP_IS_E2(bp))
9319 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
9320 else
9321 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
9322
Michael Chan993ac7b2009-10-10 13:46:56 +00009323 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009324 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
Michael Chan993ac7b2009-10-10 13:46:56 +00009325 cp->irq_arr[1].status_blk = bp->def_status_blk;
9326 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009327 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
Michael Chan993ac7b2009-10-10 13:46:56 +00009328
9329 cp->num_irq = 2;
9330}
9331
9332static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
9333 void *data)
9334{
9335 struct bnx2x *bp = netdev_priv(dev);
9336 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9337
9338 if (ops == NULL)
9339 return -EINVAL;
9340
Michael Chan993ac7b2009-10-10 13:46:56 +00009341 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
9342 if (!bp->cnic_kwq)
9343 return -ENOMEM;
9344
9345 bp->cnic_kwq_cons = bp->cnic_kwq;
9346 bp->cnic_kwq_prod = bp->cnic_kwq;
9347 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
9348
9349 bp->cnic_spq_pending = 0;
9350 bp->cnic_kwq_pending = 0;
9351
9352 bp->cnic_data = data;
9353
9354 cp->num_irq = 0;
9355 cp->drv_state = CNIC_DRV_STATE_REGD;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009356 cp->iro_arr = bp->iro_arr;
Michael Chan993ac7b2009-10-10 13:46:56 +00009357
Michael Chan993ac7b2009-10-10 13:46:56 +00009358 bnx2x_setup_cnic_irq_info(bp);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009359
Michael Chan993ac7b2009-10-10 13:46:56 +00009360 rcu_assign_pointer(bp->cnic_ops, ops);
9361
9362 return 0;
9363}
9364
9365static int bnx2x_unregister_cnic(struct net_device *dev)
9366{
9367 struct bnx2x *bp = netdev_priv(dev);
9368 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9369
9370 mutex_lock(&bp->cnic_mutex);
Michael Chan993ac7b2009-10-10 13:46:56 +00009371 cp->drv_state = 0;
9372 rcu_assign_pointer(bp->cnic_ops, NULL);
9373 mutex_unlock(&bp->cnic_mutex);
9374 synchronize_rcu();
9375 kfree(bp->cnic_kwq);
9376 bp->cnic_kwq = NULL;
9377
9378 return 0;
9379}
9380
9381struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
9382{
9383 struct bnx2x *bp = netdev_priv(dev);
9384 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9385
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00009386 /* If both iSCSI and FCoE are disabled - return NULL in
9387 * order to indicate CNIC that it should not try to work
9388 * with this device.
9389 */
9390 if (NO_ISCSI(bp) && NO_FCOE(bp))
9391 return NULL;
9392
Michael Chan993ac7b2009-10-10 13:46:56 +00009393 cp->drv_owner = THIS_MODULE;
9394 cp->chip_id = CHIP_ID(bp);
9395 cp->pdev = bp->pdev;
9396 cp->io_base = bp->regview;
9397 cp->io_base2 = bp->doorbells;
9398 cp->max_kwqe_pending = 8;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009399 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009400 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
9401 bnx2x_cid_ilt_lines(bp);
Michael Chan993ac7b2009-10-10 13:46:56 +00009402 cp->ctx_tbl_len = CNIC_ILT_LINES;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009403 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
Michael Chan993ac7b2009-10-10 13:46:56 +00009404 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
9405 cp->drv_ctl = bnx2x_drv_ctl;
9406 cp->drv_register_cnic = bnx2x_register_cnic;
9407 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00009408 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
9409 cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID +
9410 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009411 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
Michael Chan993ac7b2009-10-10 13:46:56 +00009412
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00009413 if (NO_ISCSI_OOO(bp))
9414 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
9415
9416 if (NO_ISCSI(bp))
9417 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
9418
9419 if (NO_FCOE(bp))
9420 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
9421
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009422 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
9423 "starting cid %d\n",
9424 cp->ctx_blk_size,
9425 cp->ctx_tbl_offset,
9426 cp->ctx_tbl_len,
9427 cp->starting_cid);
Michael Chan993ac7b2009-10-10 13:46:56 +00009428 return cp;
9429}
9430EXPORT_SYMBOL(bnx2x_cnic_probe);
9431
9432#endif /* BCM_CNIC */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009433