blob: d7cab0dc57f86b92481f23febf175bdf86a13689 [file] [log] [blame]
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001/* bnx2x_main.c: Broadcom Everest network driver.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002 *
Dmitry Kravkov5de92402011-05-04 23:51:13 +00003 * Copyright (c) 2007-2011 Broadcom Corporation
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
Eilon Greenstein24e3fce2008-06-12 14:30:28 -07009 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
Eilon Greensteinca003922009-08-12 22:53:28 -070013 * Slowpath and fastpath rework by Vladislav Zolotarov
Eliezer Tamirc14423f2008-02-28 11:49:42 -080014 * Statistics and Link management by Yitchak Gertner
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020015 *
16 */
17
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020018#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020026#include <linux/interrupt.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/dma-mapping.h>
33#include <linux/bitops.h>
34#include <linux/irq.h>
35#include <linux/delay.h>
36#include <asm/byteorder.h>
37#include <linux/time.h>
38#include <linux/ethtool.h>
39#include <linux/mii.h>
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080040#include <linux/if_vlan.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020041#include <net/ip.h>
42#include <net/tcp.h>
43#include <net/checksum.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070044#include <net/ip6_checksum.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020045#include <linux/workqueue.h>
46#include <linux/crc32.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070047#include <linux/crc32c.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020048#include <linux/prefetch.h>
49#include <linux/zlib.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020050#include <linux/io.h>
Ben Hutchings45229b42009-11-07 11:53:39 +000051#include <linux/stringify.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020052
Dmitry Kravkovb0efbb92010-07-27 12:33:43 +000053#define BNX2X_MAIN
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020054#include "bnx2x.h"
55#include "bnx2x_init.h"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070056#include "bnx2x_init_ops.h"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000057#include "bnx2x_cmn.h"
Vladislav Zolotarove4901dd2010-12-13 05:44:18 +000058#include "bnx2x_dcb.h"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020059
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070060#include <linux/firmware.h>
61#include "bnx2x_fw_file_hdr.h"
62/* FW files */
Ben Hutchings45229b42009-11-07 11:53:39 +000063#define FW_FILE_VERSION \
64 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
65 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
66 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
67 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
Dmitry Kravkov560131f2010-10-06 03:18:47 +000068#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
69#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000070#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070071
Eilon Greenstein34f80b02008-06-23 20:33:01 -070072/* Time in jiffies before concluding the transmitter is hung */
73#define TX_TIMEOUT (5*HZ)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020074
Andrew Morton53a10562008-02-09 23:16:41 -080075static char version[] __devinitdata =
Eilon Greenstein34f80b02008-06-23 20:33:01 -070076 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020077 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78
Eilon Greenstein24e3fce2008-06-12 14:30:28 -070079MODULE_AUTHOR("Eliezer Tamir");
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000080MODULE_DESCRIPTION("Broadcom NetXtreme II "
81 "BCM57710/57711/57711E/57712/57712E Driver");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020082MODULE_LICENSE("GPL");
83MODULE_VERSION(DRV_MODULE_VERSION);
Ben Hutchings45229b42009-11-07 11:53:39 +000084MODULE_FIRMWARE(FW_FILE_NAME_E1);
85MODULE_FIRMWARE(FW_FILE_NAME_E1H);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000086MODULE_FIRMWARE(FW_FILE_NAME_E2);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020087
Eilon Greenstein555f6c72009-02-12 08:36:11 +000088static int multi_mode = 1;
89module_param(multi_mode, int, 0);
Eilon Greensteinca003922009-08-12 22:53:28 -070090MODULE_PARM_DESC(multi_mode, " Multi queue mode "
91 "(0 Disable; 1 Enable (default))");
92
Dmitry Kravkovd6214d72010-10-06 03:32:10 +000093int num_queues;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000094module_param(num_queues, int, 0);
95MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
96 " (default is as a number of CPUs)");
Eilon Greenstein555f6c72009-02-12 08:36:11 +000097
Eilon Greenstein19680c42008-08-13 15:47:33 -070098static int disable_tpa;
Eilon Greenstein19680c42008-08-13 15:47:33 -070099module_param(disable_tpa, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000100MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
Eilon Greenstein8badd272009-02-12 08:36:15 +0000101
Dmitry Kravkov9ee3d372011-06-14 01:33:34 +0000102#define INT_MODE_INTx 1
103#define INT_MODE_MSI 2
Eilon Greenstein8badd272009-02-12 08:36:15 +0000104static int int_mode;
105module_param(int_mode, int, 0);
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000106MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
107 "(1 INT#x; 2 MSI)");
Eilon Greenstein8badd272009-02-12 08:36:15 +0000108
Eilon Greensteina18f5122009-08-12 08:23:26 +0000109static int dropless_fc;
110module_param(dropless_fc, int, 0);
111MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
112
Eilon Greenstein9898f862009-02-12 08:38:27 +0000113static int poll;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200114module_param(poll, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000115MODULE_PARM_DESC(poll, " Use polling (for debug)");
Eilon Greenstein8d5726c2009-02-12 08:37:19 +0000116
117static int mrrs = -1;
118module_param(mrrs, int, 0);
119MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
120
Eilon Greenstein9898f862009-02-12 08:38:27 +0000121static int debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200122module_param(debug, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000123MODULE_PARM_DESC(debug, " Default debug msglevel");
124
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800125static struct workqueue_struct *bnx2x_wq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200126
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000127#ifdef BCM_CNIC
128static u8 ALL_ENODE_MACS[] = {0x01, 0x10, 0x18, 0x01, 0x00, 0x01};
129#endif
130
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200131enum bnx2x_board_type {
132 BCM57710 = 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700133 BCM57711 = 1,
134 BCM57711E = 2,
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000135 BCM57712 = 3,
136 BCM57712E = 4
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200137};
138
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700139/* indexed by board_type, above */
Andrew Morton53a10562008-02-09 23:16:41 -0800140static struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200141 char *name;
142} board_info[] __devinitdata = {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700143 { "Broadcom NetXtreme II BCM57710 XGb" },
144 { "Broadcom NetXtreme II BCM57711 XGb" },
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000145 { "Broadcom NetXtreme II BCM57711E XGb" },
146 { "Broadcom NetXtreme II BCM57712 XGb" },
147 { "Broadcom NetXtreme II BCM57712E XGb" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200148};
149
Alexey Dobriyana3aa1882010-01-07 11:58:11 +0000150static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
Eilon Greensteine4ed7112009-08-12 08:24:10 +0000151 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
152 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
153 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000154 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
155 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200156 { 0 }
157};
158
159MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
160
161/****************************************************************************
162* General service functions
163****************************************************************************/
164
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000165static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
166 u32 addr, dma_addr_t mapping)
167{
168 REG_WR(bp, addr, U64_LO(mapping));
169 REG_WR(bp, addr + 4, U64_HI(mapping));
170}
171
172static inline void __storm_memset_fill(struct bnx2x *bp,
173 u32 addr, size_t size, u32 val)
174{
175 int i;
176 for (i = 0; i < size/4; i++)
177 REG_WR(bp, addr + (i * 4), val);
178}
179
180static inline void storm_memset_ustats_zero(struct bnx2x *bp,
181 u8 port, u16 stat_id)
182{
183 size_t size = sizeof(struct ustorm_per_client_stats);
184
185 u32 addr = BAR_USTRORM_INTMEM +
186 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
187
188 __storm_memset_fill(bp, addr, size, 0);
189}
190
191static inline void storm_memset_tstats_zero(struct bnx2x *bp,
192 u8 port, u16 stat_id)
193{
194 size_t size = sizeof(struct tstorm_per_client_stats);
195
196 u32 addr = BAR_TSTRORM_INTMEM +
197 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
198
199 __storm_memset_fill(bp, addr, size, 0);
200}
201
202static inline void storm_memset_xstats_zero(struct bnx2x *bp,
203 u8 port, u16 stat_id)
204{
205 size_t size = sizeof(struct xstorm_per_client_stats);
206
207 u32 addr = BAR_XSTRORM_INTMEM +
208 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
209
210 __storm_memset_fill(bp, addr, size, 0);
211}
212
213
214static inline void storm_memset_spq_addr(struct bnx2x *bp,
215 dma_addr_t mapping, u16 abs_fid)
216{
217 u32 addr = XSEM_REG_FAST_MEMORY +
218 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
219
220 __storm_memset_dma_mapping(bp, addr, mapping);
221}
222
223static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
224{
225 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
226}
227
228static inline void storm_memset_func_cfg(struct bnx2x *bp,
229 struct tstorm_eth_function_common_config *tcfg,
230 u16 abs_fid)
231{
232 size_t size = sizeof(struct tstorm_eth_function_common_config);
233
234 u32 addr = BAR_TSTRORM_INTMEM +
235 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
236
237 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
238}
239
240static inline void storm_memset_xstats_flags(struct bnx2x *bp,
241 struct stats_indication_flags *flags,
242 u16 abs_fid)
243{
244 size_t size = sizeof(struct stats_indication_flags);
245
246 u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
247
248 __storm_memset_struct(bp, addr, size, (u32 *)flags);
249}
250
251static inline void storm_memset_tstats_flags(struct bnx2x *bp,
252 struct stats_indication_flags *flags,
253 u16 abs_fid)
254{
255 size_t size = sizeof(struct stats_indication_flags);
256
257 u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
258
259 __storm_memset_struct(bp, addr, size, (u32 *)flags);
260}
261
262static inline void storm_memset_ustats_flags(struct bnx2x *bp,
263 struct stats_indication_flags *flags,
264 u16 abs_fid)
265{
266 size_t size = sizeof(struct stats_indication_flags);
267
268 u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
269
270 __storm_memset_struct(bp, addr, size, (u32 *)flags);
271}
272
273static inline void storm_memset_cstats_flags(struct bnx2x *bp,
274 struct stats_indication_flags *flags,
275 u16 abs_fid)
276{
277 size_t size = sizeof(struct stats_indication_flags);
278
279 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
280
281 __storm_memset_struct(bp, addr, size, (u32 *)flags);
282}
283
284static inline void storm_memset_xstats_addr(struct bnx2x *bp,
285 dma_addr_t mapping, u16 abs_fid)
286{
287 u32 addr = BAR_XSTRORM_INTMEM +
288 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
289
290 __storm_memset_dma_mapping(bp, addr, mapping);
291}
292
293static inline void storm_memset_tstats_addr(struct bnx2x *bp,
294 dma_addr_t mapping, u16 abs_fid)
295{
296 u32 addr = BAR_TSTRORM_INTMEM +
297 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
298
299 __storm_memset_dma_mapping(bp, addr, mapping);
300}
301
302static inline void storm_memset_ustats_addr(struct bnx2x *bp,
303 dma_addr_t mapping, u16 abs_fid)
304{
305 u32 addr = BAR_USTRORM_INTMEM +
306 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
307
308 __storm_memset_dma_mapping(bp, addr, mapping);
309}
310
311static inline void storm_memset_cstats_addr(struct bnx2x *bp,
312 dma_addr_t mapping, u16 abs_fid)
313{
314 u32 addr = BAR_CSTRORM_INTMEM +
315 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
316
317 __storm_memset_dma_mapping(bp, addr, mapping);
318}
319
320static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
321 u16 pf_id)
322{
323 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
324 pf_id);
325 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
326 pf_id);
327 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
328 pf_id);
329 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
330 pf_id);
331}
332
333static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
334 u8 enable)
335{
336 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
337 enable);
338 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
339 enable);
340 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
341 enable);
342 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
343 enable);
344}
345
346static inline void storm_memset_eq_data(struct bnx2x *bp,
347 struct event_ring_data *eq_data,
348 u16 pfid)
349{
350 size_t size = sizeof(struct event_ring_data);
351
352 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
353
354 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
355}
356
357static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
358 u16 pfid)
359{
360 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
361 REG_WR16(bp, addr, eq_prod);
362}
363
364static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
365 u16 fw_sb_id, u8 sb_index,
366 u8 ticks)
367{
368
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000369 int index_offset = CHIP_IS_E2(bp) ?
370 offsetof(struct hc_status_block_data_e2, index_data) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000371 offsetof(struct hc_status_block_data_e1x, index_data);
372 u32 addr = BAR_CSTRORM_INTMEM +
373 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
374 index_offset +
375 sizeof(struct hc_index_data)*sb_index +
376 offsetof(struct hc_index_data, timeout);
377 REG_WR8(bp, addr, ticks);
378 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
379 port, fw_sb_id, sb_index, ticks);
380}
381static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
382 u16 fw_sb_id, u8 sb_index,
383 u8 disable)
384{
385 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000386 int index_offset = CHIP_IS_E2(bp) ?
387 offsetof(struct hc_status_block_data_e2, index_data) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000388 offsetof(struct hc_status_block_data_e1x, index_data);
389 u32 addr = BAR_CSTRORM_INTMEM +
390 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
391 index_offset +
392 sizeof(struct hc_index_data)*sb_index +
393 offsetof(struct hc_index_data, flags);
394 u16 flags = REG_RD16(bp, addr);
395 /* clear and set */
396 flags &= ~HC_INDEX_DATA_HC_ENABLED;
397 flags |= enable_flag;
398 REG_WR16(bp, addr, flags);
399 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
400 port, fw_sb_id, sb_index, disable);
401}
402
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200403/* used only at init
404 * locking is done by mcp
405 */
stephen hemminger8d962862010-10-21 07:50:56 +0000406static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200407{
408 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
409 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
410 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
411 PCICFG_VENDOR_ID_OFFSET);
412}
413
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200414static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
415{
416 u32 val;
417
418 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
419 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
420 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
421 PCICFG_VENDOR_ID_OFFSET);
422
423 return val;
424}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200425
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000426#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
427#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
428#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
429#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
430#define DMAE_DP_DST_NONE "dst_addr [none]"
431
stephen hemminger8d962862010-10-21 07:50:56 +0000432static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
433 int msglvl)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000434{
435 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
436
437 switch (dmae->opcode & DMAE_COMMAND_DST) {
438 case DMAE_CMD_DST_PCI:
439 if (src_type == DMAE_CMD_SRC_PCI)
440 DP(msglvl, "DMAE: opcode 0x%08x\n"
441 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
442 "comp_addr [%x:%08x], comp_val 0x%08x\n",
443 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
444 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
445 dmae->comp_addr_hi, dmae->comp_addr_lo,
446 dmae->comp_val);
447 else
448 DP(msglvl, "DMAE: opcode 0x%08x\n"
449 "src [%08x], len [%d*4], dst [%x:%08x]\n"
450 "comp_addr [%x:%08x], comp_val 0x%08x\n",
451 dmae->opcode, dmae->src_addr_lo >> 2,
452 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
453 dmae->comp_addr_hi, dmae->comp_addr_lo,
454 dmae->comp_val);
455 break;
456 case DMAE_CMD_DST_GRC:
457 if (src_type == DMAE_CMD_SRC_PCI)
458 DP(msglvl, "DMAE: opcode 0x%08x\n"
459 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
460 "comp_addr [%x:%08x], comp_val 0x%08x\n",
461 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
462 dmae->len, dmae->dst_addr_lo >> 2,
463 dmae->comp_addr_hi, dmae->comp_addr_lo,
464 dmae->comp_val);
465 else
466 DP(msglvl, "DMAE: opcode 0x%08x\n"
467 "src [%08x], len [%d*4], dst [%08x]\n"
468 "comp_addr [%x:%08x], comp_val 0x%08x\n",
469 dmae->opcode, dmae->src_addr_lo >> 2,
470 dmae->len, dmae->dst_addr_lo >> 2,
471 dmae->comp_addr_hi, dmae->comp_addr_lo,
472 dmae->comp_val);
473 break;
474 default:
475 if (src_type == DMAE_CMD_SRC_PCI)
476 DP(msglvl, "DMAE: opcode 0x%08x\n"
477 DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
478 "dst_addr [none]\n"
479 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
480 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
481 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
482 dmae->comp_val);
483 else
484 DP(msglvl, "DMAE: opcode 0x%08x\n"
485 DP_LEVEL "src_addr [%08x] len [%d * 4] "
486 "dst_addr [none]\n"
487 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
488 dmae->opcode, dmae->src_addr_lo >> 2,
489 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
490 dmae->comp_val);
491 break;
492 }
493
494}
495
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000496const u32 dmae_reg_go_c[] = {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200497 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
498 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
499 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
500 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
501};
502
503/* copy command into DMAE command memory and set DMAE command go */
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000504void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200505{
506 u32 cmd_offset;
507 int i;
508
509 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
510 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
511 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
512
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700513 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
514 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200515 }
516 REG_WR(bp, dmae_reg_go_c[idx], 1);
517}
518
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000519u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
520{
521 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
522 DMAE_CMD_C_ENABLE);
523}
524
525u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
526{
527 return opcode & ~DMAE_CMD_SRC_RESET;
528}
529
530u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
531 bool with_comp, u8 comp_type)
532{
533 u32 opcode = 0;
534
535 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
536 (dst_type << DMAE_COMMAND_DST_SHIFT));
537
538 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
539
540 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
541 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
542 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
543 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
544
545#ifdef __BIG_ENDIAN
546 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
547#else
548 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
549#endif
550 if (with_comp)
551 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
552 return opcode;
553}
554
stephen hemminger8d962862010-10-21 07:50:56 +0000555static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
556 struct dmae_command *dmae,
557 u8 src_type, u8 dst_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000558{
559 memset(dmae, 0, sizeof(struct dmae_command));
560
561 /* set the opcode */
562 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
563 true, DMAE_COMP_PCI);
564
565 /* fill in the completion parameters */
566 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
567 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
568 dmae->comp_val = DMAE_COMP_VAL;
569}
570
571/* issue a dmae command over the init-channel and wailt for completion */
stephen hemminger8d962862010-10-21 07:50:56 +0000572static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
573 struct dmae_command *dmae)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000574{
575 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
Dmitry Kravkov5e374b52011-05-22 10:09:19 +0000576 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000577 int rc = 0;
578
579 DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
580 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
581 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
582
583 /* lock the dmae channel */
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -0800584 spin_lock_bh(&bp->dmae_lock);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000585
586 /* reset completion */
587 *wb_comp = 0;
588
589 /* post the command on the channel used for initializations */
590 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
591
592 /* wait for completion */
593 udelay(5);
594 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
595 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
596
597 if (!cnt) {
598 BNX2X_ERR("DMAE timeout!\n");
599 rc = DMAE_TIMEOUT;
600 goto unlock;
601 }
602 cnt--;
603 udelay(50);
604 }
605 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
606 BNX2X_ERR("DMAE PCI error!\n");
607 rc = DMAE_PCI_ERROR;
608 }
609
610 DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
611 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
612 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
613
614unlock:
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -0800615 spin_unlock_bh(&bp->dmae_lock);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000616 return rc;
617}
618
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700619void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
620 u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200621{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000622 struct dmae_command dmae;
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700623
624 if (!bp->dmae_ready) {
625 u32 *data = bnx2x_sp(bp, wb_data[0]);
626
627 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
628 " using indirect\n", dst_addr, len32);
629 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
630 return;
631 }
632
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000633 /* set opcode and fixed command fields */
634 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200635
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000636 /* fill in addresses and len */
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000637 dmae.src_addr_lo = U64_LO(dma_addr);
638 dmae.src_addr_hi = U64_HI(dma_addr);
639 dmae.dst_addr_lo = dst_addr >> 2;
640 dmae.dst_addr_hi = 0;
641 dmae.len = len32;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200642
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000643 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200644
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000645 /* issue the command and wait for completion */
646 bnx2x_issue_dmae_with_comp(bp, &dmae);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200647}
648
Yaniv Rosnerc18487e2008-06-23 20:27:52 -0700649void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200650{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000651 struct dmae_command dmae;
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700652
653 if (!bp->dmae_ready) {
654 u32 *data = bnx2x_sp(bp, wb_data[0]);
655 int i;
656
657 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
658 " using indirect\n", src_addr, len32);
659 for (i = 0; i < len32; i++)
660 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
661 return;
662 }
663
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000664 /* set opcode and fixed command fields */
665 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200666
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000667 /* fill in addresses and len */
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000668 dmae.src_addr_lo = src_addr >> 2;
669 dmae.src_addr_hi = 0;
670 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
671 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
672 dmae.len = len32;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200673
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000674 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200675
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000676 /* issue the command and wait for completion */
677 bnx2x_issue_dmae_with_comp(bp, &dmae);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200678}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200679
stephen hemminger8d962862010-10-21 07:50:56 +0000680static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
681 u32 addr, u32 len)
Eilon Greenstein573f2032009-08-12 08:24:14 +0000682{
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000683 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
Eilon Greenstein573f2032009-08-12 08:24:14 +0000684 int offset = 0;
685
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000686 while (len > dmae_wr_max) {
Eilon Greenstein573f2032009-08-12 08:24:14 +0000687 bnx2x_write_dmae(bp, phys_addr + offset,
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000688 addr + offset, dmae_wr_max);
689 offset += dmae_wr_max * 4;
690 len -= dmae_wr_max;
Eilon Greenstein573f2032009-08-12 08:24:14 +0000691 }
692
693 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
694}
695
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700696/* used only for slowpath so not inlined */
697static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
698{
699 u32 wb_write[2];
700
701 wb_write[0] = val_hi;
702 wb_write[1] = val_lo;
703 REG_WR_DMAE(bp, reg, wb_write, 2);
704}
705
706#ifdef USE_WB_RD
707static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
708{
709 u32 wb_data[2];
710
711 REG_RD_DMAE(bp, reg, wb_data, 2);
712
713 return HILO_U64(wb_data[0], wb_data[1]);
714}
715#endif
716
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200717static int bnx2x_mc_assert(struct bnx2x *bp)
718{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200719 char last_idx;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700720 int i, rc = 0;
721 u32 row0, row1, row2, row3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200722
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700723 /* XSTORM */
724 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
725 XSTORM_ASSERT_LIST_INDEX_OFFSET);
726 if (last_idx)
727 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200728
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700729 /* print the asserts */
730 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200731
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700732 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
733 XSTORM_ASSERT_LIST_OFFSET(i));
734 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
735 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
736 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
737 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
738 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
739 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200740
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700741 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
742 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
743 " 0x%08x 0x%08x 0x%08x\n",
744 i, row3, row2, row1, row0);
745 rc++;
746 } else {
747 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200748 }
749 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700750
751 /* TSTORM */
752 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
753 TSTORM_ASSERT_LIST_INDEX_OFFSET);
754 if (last_idx)
755 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
756
757 /* print the asserts */
758 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
759
760 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
761 TSTORM_ASSERT_LIST_OFFSET(i));
762 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
763 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
764 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
765 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
766 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
767 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
768
769 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
770 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
771 " 0x%08x 0x%08x 0x%08x\n",
772 i, row3, row2, row1, row0);
773 rc++;
774 } else {
775 break;
776 }
777 }
778
779 /* CSTORM */
780 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
781 CSTORM_ASSERT_LIST_INDEX_OFFSET);
782 if (last_idx)
783 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
784
785 /* print the asserts */
786 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
787
788 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
789 CSTORM_ASSERT_LIST_OFFSET(i));
790 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
791 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
792 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
793 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
794 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
795 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
796
797 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
798 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
799 " 0x%08x 0x%08x 0x%08x\n",
800 i, row3, row2, row1, row0);
801 rc++;
802 } else {
803 break;
804 }
805 }
806
807 /* USTORM */
808 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
809 USTORM_ASSERT_LIST_INDEX_OFFSET);
810 if (last_idx)
811 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
812
813 /* print the asserts */
814 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
815
816 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
817 USTORM_ASSERT_LIST_OFFSET(i));
818 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
819 USTORM_ASSERT_LIST_OFFSET(i) + 4);
820 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
821 USTORM_ASSERT_LIST_OFFSET(i) + 8);
822 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
823 USTORM_ASSERT_LIST_OFFSET(i) + 12);
824
825 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
826 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
827 " 0x%08x 0x%08x 0x%08x\n",
828 i, row3, row2, row1, row0);
829 rc++;
830 } else {
831 break;
832 }
833 }
834
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200835 return rc;
836}
Eliezer Tamirc14423f2008-02-28 11:49:42 -0800837
Dmitry Kravkov7a25cc72011-06-14 01:33:25 +0000838void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200839{
Dmitry Kravkov7a25cc72011-06-14 01:33:25 +0000840 u32 addr, val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200841 u32 mark, offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +0000842 __be32 data[9];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200843 int word;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000844 u32 trace_shmem_base;
Vladislav Zolotarov2145a922010-04-19 01:13:49 +0000845 if (BP_NOMCP(bp)) {
846 BNX2X_ERR("NO MCP - can not dump\n");
847 return;
848 }
Dmitry Kravkov7a25cc72011-06-14 01:33:25 +0000849 netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
850 (bp->common.bc_ver & 0xff0000) >> 16,
851 (bp->common.bc_ver & 0xff00) >> 8,
852 (bp->common.bc_ver & 0xff));
853
854 val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
855 if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
856 printk("%s" "MCP PC at 0x%x\n", lvl, val);
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000857
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000858 if (BP_PATH(bp) == 0)
859 trace_shmem_base = bp->common.shmem_base;
860 else
861 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
862 addr = trace_shmem_base - 0x0800 + 4;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000863 mark = REG_RD(bp, addr);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000864 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
865 + ((mark + 0x3) & ~0x3) - 0x08000000;
Dmitry Kravkov7a25cc72011-06-14 01:33:25 +0000866 printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200867
Dmitry Kravkov7a25cc72011-06-14 01:33:25 +0000868 printk("%s", lvl);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000869 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200870 for (word = 0; word < 8; word++)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000871 data[word] = htonl(REG_RD(bp, offset + 4*word));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200872 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000873 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200874 }
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000875 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200876 for (word = 0; word < 8; word++)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000877 data[word] = htonl(REG_RD(bp, offset + 4*word));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200878 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000879 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200880 }
Dmitry Kravkov7a25cc72011-06-14 01:33:25 +0000881 printk("%s" "end of fw dump\n", lvl);
882}
883
884static inline void bnx2x_fw_dump(struct bnx2x *bp)
885{
886 bnx2x_fw_dump_lvl(bp, KERN_ERR);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200887}
888
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000889void bnx2x_panic_dump(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200890{
891 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000892 u16 j;
893 struct hc_sp_status_block_data sp_sb_data;
894 int func = BP_FUNC(bp);
895#ifdef BNX2X_STOP_ON_ERROR
896 u16 start = 0, end = 0;
897#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200898
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700899 bp->stats_state = STATS_STATE_DISABLED;
900 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
901
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200902 BNX2X_ERR("begin crash dump -----------------\n");
903
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000904 /* Indices */
905 /* Common */
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000906 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000907 " spq_prod_idx(0x%x)\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000908 bp->def_idx, bp->def_att_idx,
909 bp->attn_state, bp->spq_prod_idx);
910 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
911 bp->def_status_blk->atten_status_block.attn_bits,
912 bp->def_status_blk->atten_status_block.attn_bits_ack,
913 bp->def_status_blk->atten_status_block.status_block_id,
914 bp->def_status_blk->atten_status_block.attn_bits_index);
915 BNX2X_ERR(" def (");
916 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
917 pr_cont("0x%x%s",
918 bp->def_status_blk->sp_sb.index_values[i],
919 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000920
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000921 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
922 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
923 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
924 i*sizeof(u32));
925
926 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
927 "pf_id(0x%x) vnic_id(0x%x) "
928 "vf_id(0x%x) vf_valid (0x%x)\n",
929 sp_sb_data.igu_sb_id,
930 sp_sb_data.igu_seg_id,
931 sp_sb_data.p_func.pf_id,
932 sp_sb_data.p_func.vnic_id,
933 sp_sb_data.p_func.vf_id,
934 sp_sb_data.p_func.vf_valid);
935
936
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000937 for_each_eth_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000938 struct bnx2x_fastpath *fp = &bp->fp[i];
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000939 int loop;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000940 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000941 struct hc_status_block_data_e1x sb_data_e1x;
942 struct hc_status_block_sm *hc_sm_p =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000943 CHIP_IS_E2(bp) ?
944 sb_data_e2.common.state_machine :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000945 sb_data_e1x.common.state_machine;
946 struct hc_index_data *hc_index_p =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000947 CHIP_IS_E2(bp) ?
948 sb_data_e2.index_data :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000949 sb_data_e1x.index_data;
950 int data_size;
951 u32 *sb_data_p;
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000952
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000953 /* Rx */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000954 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000955 " rx_comp_prod(0x%x)"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000956 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000957 i, fp->rx_bd_prod, fp->rx_bd_cons,
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000958 fp->rx_comp_prod,
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000959 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000960 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000961 " fp_hc_idx(0x%x)\n",
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000962 fp->rx_sge_prod, fp->last_max_sge,
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000963 le16_to_cpu(fp->fp_hc_idx));
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000964
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000965 /* Tx */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000966 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
967 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
968 " *tx_cons_sb(0x%x)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200969 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700970 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000971
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000972 loop = CHIP_IS_E2(bp) ?
973 HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000974
975 /* host sb data */
976
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000977#ifdef BCM_CNIC
978 if (IS_FCOE_FP(fp))
979 continue;
980#endif
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000981 BNX2X_ERR(" run indexes (");
982 for (j = 0; j < HC_SB_MAX_SM; j++)
983 pr_cont("0x%x%s",
984 fp->sb_running_index[j],
985 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
986
987 BNX2X_ERR(" indexes (");
988 for (j = 0; j < loop; j++)
989 pr_cont("0x%x%s",
990 fp->sb_index_values[j],
991 (j == loop - 1) ? ")" : " ");
992 /* fw sb data */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000993 data_size = CHIP_IS_E2(bp) ?
994 sizeof(struct hc_status_block_data_e2) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000995 sizeof(struct hc_status_block_data_e1x);
996 data_size /= sizeof(u32);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000997 sb_data_p = CHIP_IS_E2(bp) ?
998 (u32 *)&sb_data_e2 :
999 (u32 *)&sb_data_e1x;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001000 /* copy sb data in here */
1001 for (j = 0; j < data_size; j++)
1002 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
1003 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
1004 j * sizeof(u32));
1005
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001006 if (CHIP_IS_E2(bp)) {
1007 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
1008 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
1009 sb_data_e2.common.p_func.pf_id,
1010 sb_data_e2.common.p_func.vf_id,
1011 sb_data_e2.common.p_func.vf_valid,
1012 sb_data_e2.common.p_func.vnic_id,
1013 sb_data_e2.common.same_igu_sb_1b);
1014 } else {
1015 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
1016 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
1017 sb_data_e1x.common.p_func.pf_id,
1018 sb_data_e1x.common.p_func.vf_id,
1019 sb_data_e1x.common.p_func.vf_valid,
1020 sb_data_e1x.common.p_func.vnic_id,
1021 sb_data_e1x.common.same_igu_sb_1b);
1022 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001023
1024 /* SB_SMs data */
1025 for (j = 0; j < HC_SB_MAX_SM; j++) {
1026 pr_cont("SM[%d] __flags (0x%x) "
1027 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
1028 "time_to_expire (0x%x) "
1029 "timer_value(0x%x)\n", j,
1030 hc_sm_p[j].__flags,
1031 hc_sm_p[j].igu_sb_id,
1032 hc_sm_p[j].igu_seg_id,
1033 hc_sm_p[j].time_to_expire,
1034 hc_sm_p[j].timer_value);
1035 }
1036
1037 /* Indecies data */
1038 for (j = 0; j < loop; j++) {
1039 pr_cont("INDEX[%d] flags (0x%x) "
1040 "timeout (0x%x)\n", j,
1041 hc_index_p[j].flags,
1042 hc_index_p[j].timeout);
1043 }
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001044 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001045
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001046#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001047 /* Rings */
1048 /* Rx */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001049 for_each_rx_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001050 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001051
1052 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1053 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001054 for (j = start; j != end; j = RX_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001055 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1056 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1057
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001058 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1059 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001060 }
1061
Eilon Greenstein3196a882008-08-13 15:58:49 -07001062 start = RX_SGE(fp->rx_sge_prod);
1063 end = RX_SGE(fp->last_max_sge);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001064 for (j = start; j != end; j = RX_SGE(j + 1)) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001065 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1066 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1067
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001068 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1069 i, j, rx_sge[1], rx_sge[0], sw_page->page);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001070 }
1071
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001072 start = RCQ_BD(fp->rx_comp_cons - 10);
1073 end = RCQ_BD(fp->rx_comp_cons + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001074 for (j = start; j != end; j = RCQ_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001075 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1076
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001077 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1078 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001079 }
1080 }
1081
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001082 /* Tx */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001083 for_each_tx_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001084 struct bnx2x_fastpath *fp = &bp->fp[i];
1085
1086 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
1087 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
1088 for (j = start; j != end; j = TX_BD(j + 1)) {
1089 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
1090
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001091 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
1092 i, j, sw_bd->skb, sw_bd->first_bd);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001093 }
1094
1095 start = TX_BD(fp->tx_bd_cons - 10);
1096 end = TX_BD(fp->tx_bd_cons + 254);
1097 for (j = start; j != end; j = TX_BD(j + 1)) {
1098 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
1099
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001100 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
1101 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001102 }
1103 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001104#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001105 bnx2x_fw_dump(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001106 bnx2x_mc_assert(bp);
1107 BNX2X_ERR("end crash dump -----------------\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001108}
1109
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001110static void bnx2x_hc_int_enable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001111{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001112 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001113 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1114 u32 val = REG_RD(bp, addr);
1115 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +00001116 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001117
1118 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001119 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1120 HC_CONFIG_0_REG_INT_LINE_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001121 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1122 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eilon Greenstein8badd272009-02-12 08:36:15 +00001123 } else if (msi) {
1124 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1125 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1126 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1127 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001128 } else {
1129 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001130 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001131 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1132 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001133
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001134 if (!CHIP_IS_E1(bp)) {
1135 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1136 val, port, addr);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001137
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001138 REG_WR(bp, addr, val);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001139
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001140 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1141 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001142 }
1143
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001144 if (CHIP_IS_E1(bp))
1145 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1146
Eilon Greenstein8badd272009-02-12 08:36:15 +00001147 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
1148 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001149
1150 REG_WR(bp, addr, val);
Eilon Greenstein37dbbf32009-07-21 05:47:33 +00001151 /*
1152 * Ensure that HC_CONFIG is written before leading/trailing edge config
1153 */
1154 mmiowb();
1155 barrier();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001156
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001157 if (!CHIP_IS_E1(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001158 /* init leading/trailing edge */
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00001159 if (IS_MF(bp)) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001160 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001161 if (bp->port.pmf)
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001162 /* enable nig and gpio3 attention */
1163 val |= 0x1100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001164 } else
1165 val = 0xffff;
1166
1167 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1168 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1169 }
Eilon Greenstein37dbbf32009-07-21 05:47:33 +00001170
1171 /* Make sure that interrupts are indeed enabled from here on */
1172 mmiowb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001173}
1174
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001175static void bnx2x_igu_int_enable(struct bnx2x *bp)
1176{
1177 u32 val;
1178 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1179 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1180
1181 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1182
1183 if (msix) {
1184 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1185 IGU_PF_CONF_SINGLE_ISR_EN);
1186 val |= (IGU_PF_CONF_FUNC_EN |
1187 IGU_PF_CONF_MSI_MSIX_EN |
1188 IGU_PF_CONF_ATTN_BIT_EN);
1189 } else if (msi) {
1190 val &= ~IGU_PF_CONF_INT_LINE_EN;
1191 val |= (IGU_PF_CONF_FUNC_EN |
1192 IGU_PF_CONF_MSI_MSIX_EN |
1193 IGU_PF_CONF_ATTN_BIT_EN |
1194 IGU_PF_CONF_SINGLE_ISR_EN);
1195 } else {
1196 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1197 val |= (IGU_PF_CONF_FUNC_EN |
1198 IGU_PF_CONF_INT_LINE_EN |
1199 IGU_PF_CONF_ATTN_BIT_EN |
1200 IGU_PF_CONF_SINGLE_ISR_EN);
1201 }
1202
1203 DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
1204 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1205
1206 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1207
1208 barrier();
1209
1210 /* init leading/trailing edge */
1211 if (IS_MF(bp)) {
1212 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1213 if (bp->port.pmf)
1214 /* enable nig and gpio3 attention */
1215 val |= 0x1100;
1216 } else
1217 val = 0xffff;
1218
1219 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1220 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1221
1222 /* Make sure that interrupts are indeed enabled from here on */
1223 mmiowb();
1224}
1225
1226void bnx2x_int_enable(struct bnx2x *bp)
1227{
1228 if (bp->common.int_block == INT_BLOCK_HC)
1229 bnx2x_hc_int_enable(bp);
1230 else
1231 bnx2x_igu_int_enable(bp);
1232}
1233
1234static void bnx2x_hc_int_disable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001235{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001236 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001237 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1238 u32 val = REG_RD(bp, addr);
1239
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001240 /*
1241 * in E1 we must use only PCI configuration space to disable
1242 * MSI/MSIX capablility
1243 * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
1244 */
1245 if (CHIP_IS_E1(bp)) {
1246 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
1247 * Use mask register to prevent from HC sending interrupts
1248 * after we exit the function
1249 */
1250 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
1251
1252 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1253 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1254 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1255 } else
1256 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1257 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1258 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1259 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001260
1261 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1262 val, port, addr);
1263
Eilon Greenstein8badd272009-02-12 08:36:15 +00001264 /* flush all outstanding writes */
1265 mmiowb();
1266
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001267 REG_WR(bp, addr, val);
1268 if (REG_RD(bp, addr) != val)
1269 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1270}
1271
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001272static void bnx2x_igu_int_disable(struct bnx2x *bp)
1273{
1274 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1275
1276 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1277 IGU_PF_CONF_INT_LINE_EN |
1278 IGU_PF_CONF_ATTN_BIT_EN);
1279
1280 DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1281
1282 /* flush all outstanding writes */
1283 mmiowb();
1284
1285 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1286 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1287 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1288}
1289
stephen hemminger8d962862010-10-21 07:50:56 +00001290static void bnx2x_int_disable(struct bnx2x *bp)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001291{
1292 if (bp->common.int_block == INT_BLOCK_HC)
1293 bnx2x_hc_int_disable(bp);
1294 else
1295 bnx2x_igu_int_disable(bp);
1296}
1297
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001298void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001299{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001300 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +00001301 int i, offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001302
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07001303 if (disable_hw)
1304 /* prevent the HW from sending interrupts */
1305 bnx2x_int_disable(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001306
1307 /* make sure all ISRs are done */
1308 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001309 synchronize_irq(bp->msix_table[0].vector);
1310 offset = 1;
Michael Chan37b091b2009-10-10 13:46:55 +00001311#ifdef BCM_CNIC
1312 offset++;
1313#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001314 for_each_eth_queue(bp, i)
Eilon Greenstein8badd272009-02-12 08:36:15 +00001315 synchronize_irq(bp->msix_table[i + offset].vector);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001316 } else
1317 synchronize_irq(bp->pdev->irq);
1318
1319 /* make sure sp_task is not running */
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001320 cancel_delayed_work(&bp->sp_task);
1321 flush_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001322}
1323
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001324/* fast path */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001325
1326/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001327 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001328 */
1329
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001330/* Return true if succeeded to acquire the lock */
1331static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1332{
1333 u32 lock_status;
1334 u32 resource_bit = (1 << resource);
1335 int func = BP_FUNC(bp);
1336 u32 hw_lock_control_reg;
1337
1338 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1339
1340 /* Validating that the resource is within range */
1341 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1342 DP(NETIF_MSG_HW,
1343 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1344 resource, HW_LOCK_MAX_RESOURCE_VALUE);
Eric Dumazet0fdf4d02010-08-26 22:03:53 -07001345 return false;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001346 }
1347
1348 if (func <= 5)
1349 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1350 else
1351 hw_lock_control_reg =
1352 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1353
1354 /* Try to acquire the lock */
1355 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1356 lock_status = REG_RD(bp, hw_lock_control_reg);
1357 if (lock_status & resource_bit)
1358 return true;
1359
1360 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1361 return false;
1362}
1363
Michael Chan993ac7b2009-10-10 13:46:56 +00001364#ifdef BCM_CNIC
1365static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1366#endif
Eilon Greenstein3196a882008-08-13 15:58:49 -07001367
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001368void bnx2x_sp_event(struct bnx2x_fastpath *fp,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001369 union eth_rx_cqe *rr_cqe)
1370{
1371 struct bnx2x *bp = fp->bp;
1372 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1373 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1374
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001375 DP(BNX2X_MSG_SP,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001376 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +00001377 fp->index, cid, command, bp->state,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001378 rr_cqe->ramrod_cqe.ramrod_type);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001379
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001380 switch (command | fp->state) {
1381 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1382 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1383 fp->state = BNX2X_FP_STATE_OPEN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001384 break;
1385
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001386 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1387 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001388 fp->state = BNX2X_FP_STATE_HALTED;
1389 break;
1390
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001391 case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1392 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1393 fp->state = BNX2X_FP_STATE_TERMINATED;
Eliezer Tamir49d66772008-02-28 11:53:13 -08001394 break;
1395
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001396 default:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001397 BNX2X_ERR("unexpected MC reply (%d) "
1398 "fp[%d] state is %x\n",
1399 command, fp->index, fp->state);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001400 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001401 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001402
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00001403 smp_mb__before_atomic_inc();
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08001404 atomic_inc(&bp->cq_spq_left);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001405 /* push the change in fp->state and towards the memory */
1406 smp_wmb();
1407
1408 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001409}
1410
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001411irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001412{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001413 struct bnx2x *bp = netdev_priv(dev_instance);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001414 u16 status = bnx2x_ack_int(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001415 u16 mask;
Eilon Greensteinca003922009-08-12 22:53:28 -07001416 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001417
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001418 /* Return here if interrupt is shared and it's not for us */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001419 if (unlikely(status == 0)) {
1420 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1421 return IRQ_NONE;
1422 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001423 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001424
Eilon Greenstein3196a882008-08-13 15:58:49 -07001425#ifdef BNX2X_STOP_ON_ERROR
1426 if (unlikely(bp->panic))
1427 return IRQ_HANDLED;
1428#endif
1429
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001430 for_each_eth_queue(bp, i) {
Eilon Greensteinca003922009-08-12 22:53:28 -07001431 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001432
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001433 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
Eilon Greensteinca003922009-08-12 22:53:28 -07001434 if (status & mask) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001435 /* Handle Rx and Tx according to SB id */
1436 prefetch(fp->rx_cons_sb);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001437 prefetch(fp->tx_cons_sb);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001438 prefetch(&fp->sb_running_index[SM_RX_ID]);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001439 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
Eilon Greensteinca003922009-08-12 22:53:28 -07001440 status &= ~mask;
1441 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001442 }
1443
Michael Chan993ac7b2009-10-10 13:46:56 +00001444#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001445 mask = 0x2;
Michael Chan993ac7b2009-10-10 13:46:56 +00001446 if (status & (mask | 0x1)) {
1447 struct cnic_ops *c_ops = NULL;
1448
1449 rcu_read_lock();
1450 c_ops = rcu_dereference(bp->cnic_ops);
1451 if (c_ops)
1452 c_ops->cnic_handler(bp->cnic_data, NULL);
1453 rcu_read_unlock();
1454
1455 status &= ~mask;
1456 }
1457#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001458
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001459 if (unlikely(status & 0x1)) {
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001460 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001461
1462 status &= ~0x1;
1463 if (!status)
1464 return IRQ_HANDLED;
1465 }
1466
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00001467 if (unlikely(status))
1468 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001469 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001470
1471 return IRQ_HANDLED;
1472}
1473
1474/* end of fast path */
1475
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001476
1477/* Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001478
1479/*
1480 * General service functions
1481 */
1482
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001483int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001484{
Eliezer Tamirf1410642008-02-28 11:51:50 -08001485 u32 lock_status;
1486 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001487 int func = BP_FUNC(bp);
1488 u32 hw_lock_control_reg;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001489 int cnt;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001490
1491 /* Validating that the resource is within range */
1492 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1493 DP(NETIF_MSG_HW,
1494 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1495 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1496 return -EINVAL;
1497 }
1498
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001499 if (func <= 5) {
1500 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1501 } else {
1502 hw_lock_control_reg =
1503 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1504 }
1505
Eliezer Tamirf1410642008-02-28 11:51:50 -08001506 /* Validating that the resource is not already taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001507 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001508 if (lock_status & resource_bit) {
1509 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1510 lock_status, resource_bit);
1511 return -EEXIST;
1512 }
1513
Eilon Greenstein46230476b2008-08-25 15:23:30 -07001514 /* Try for 5 second every 5ms */
1515 for (cnt = 0; cnt < 1000; cnt++) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08001516 /* Try to acquire the lock */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001517 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1518 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001519 if (lock_status & resource_bit)
1520 return 0;
1521
1522 msleep(5);
1523 }
1524 DP(NETIF_MSG_HW, "Timeout\n");
1525 return -EAGAIN;
1526}
1527
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001528int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001529{
1530 u32 lock_status;
1531 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001532 int func = BP_FUNC(bp);
1533 u32 hw_lock_control_reg;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001534
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001535 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1536
Eliezer Tamirf1410642008-02-28 11:51:50 -08001537 /* Validating that the resource is within range */
1538 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1539 DP(NETIF_MSG_HW,
1540 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1541 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1542 return -EINVAL;
1543 }
1544
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001545 if (func <= 5) {
1546 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1547 } else {
1548 hw_lock_control_reg =
1549 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1550 }
1551
Eliezer Tamirf1410642008-02-28 11:51:50 -08001552 /* Validating that the resource is currently taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001553 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001554 if (!(lock_status & resource_bit)) {
1555 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1556 lock_status, resource_bit);
1557 return -EFAULT;
1558 }
1559
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001560 REG_WR(bp, hw_lock_control_reg, resource_bit);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001561 return 0;
1562}
1563
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001564
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001565int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1566{
1567 /* The GPIO should be swapped if swap register is set and active */
1568 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1569 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1570 int gpio_shift = gpio_num +
1571 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1572 u32 gpio_mask = (1 << gpio_shift);
1573 u32 gpio_reg;
1574 int value;
1575
1576 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1577 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1578 return -EINVAL;
1579 }
1580
1581 /* read GPIO value */
1582 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1583
1584 /* get the requested pin value */
1585 if ((gpio_reg & gpio_mask) == gpio_mask)
1586 value = 1;
1587 else
1588 value = 0;
1589
1590 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1591
1592 return value;
1593}
1594
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001595int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001596{
1597 /* The GPIO should be swapped if swap register is set and active */
1598 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001599 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001600 int gpio_shift = gpio_num +
1601 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1602 u32 gpio_mask = (1 << gpio_shift);
1603 u32 gpio_reg;
1604
1605 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1606 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1607 return -EINVAL;
1608 }
1609
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001610 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001611 /* read GPIO and mask except the float bits */
1612 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1613
1614 switch (mode) {
1615 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1616 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1617 gpio_num, gpio_shift);
1618 /* clear FLOAT and set CLR */
1619 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1620 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1621 break;
1622
1623 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1624 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1625 gpio_num, gpio_shift);
1626 /* clear FLOAT and set SET */
1627 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1628 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1629 break;
1630
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001631 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001632 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1633 gpio_num, gpio_shift);
1634 /* set FLOAT */
1635 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1636 break;
1637
1638 default:
1639 break;
1640 }
1641
1642 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001643 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001644
1645 return 0;
1646}
1647
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001648int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1649{
1650 /* The GPIO should be swapped if swap register is set and active */
1651 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1652 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1653 int gpio_shift = gpio_num +
1654 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1655 u32 gpio_mask = (1 << gpio_shift);
1656 u32 gpio_reg;
1657
1658 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1659 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1660 return -EINVAL;
1661 }
1662
1663 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1664 /* read GPIO int */
1665 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1666
1667 switch (mode) {
1668 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1669 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1670 "output low\n", gpio_num, gpio_shift);
1671 /* clear SET and set CLR */
1672 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1673 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1674 break;
1675
1676 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1677 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1678 "output high\n", gpio_num, gpio_shift);
1679 /* clear CLR and set SET */
1680 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1681 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1682 break;
1683
1684 default:
1685 break;
1686 }
1687
1688 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1689 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1690
1691 return 0;
1692}
1693
Eliezer Tamirf1410642008-02-28 11:51:50 -08001694static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1695{
1696 u32 spio_mask = (1 << spio_num);
1697 u32 spio_reg;
1698
1699 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1700 (spio_num > MISC_REGISTERS_SPIO_7)) {
1701 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1702 return -EINVAL;
1703 }
1704
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001705 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001706 /* read SPIO and mask except the float bits */
1707 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1708
1709 switch (mode) {
Eilon Greenstein6378c022008-08-13 15:59:25 -07001710 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001711 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1712 /* clear FLOAT and set CLR */
1713 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1714 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1715 break;
1716
Eilon Greenstein6378c022008-08-13 15:59:25 -07001717 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001718 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1719 /* clear FLOAT and set SET */
1720 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1721 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1722 break;
1723
1724 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1725 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1726 /* set FLOAT */
1727 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1728 break;
1729
1730 default:
1731 break;
1732 }
1733
1734 REG_WR(bp, MISC_REG_SPIO, spio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001735 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001736
1737 return 0;
1738}
1739
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001740void bnx2x_calc_fc_adv(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001741{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001742 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
Eilon Greensteinad33ea32009-01-14 21:24:57 -08001743 switch (bp->link_vars.ieee_fc &
1744 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001745 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001746 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001747 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001748 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001749
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001750 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001751 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001752 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001753 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001754
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001755 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001756 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001757 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001758
Eliezer Tamirf1410642008-02-28 11:51:50 -08001759 default:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001760 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001761 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001762 break;
1763 }
1764}
1765
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001766u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001767{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001768 if (!BP_NOMCP(bp)) {
1769 u8 rc;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001770 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1771 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
Eilon Greenstein19680c42008-08-13 15:47:33 -07001772 /* Initialize link parameters structure variables */
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07001773 /* It is recommended to turn off RX FC for jumbo frames
1774 for better performance */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001775 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
David S. Millerc0700f92008-12-16 23:53:20 -08001776 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07001777 else
David S. Millerc0700f92008-12-16 23:53:20 -08001778 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001779
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001780 bnx2x_acquire_phy_lock(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001781
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001782 if (load_mode == LOAD_DIAG) {
Yaniv Rosnerde6eae12010-09-07 11:41:13 +00001783 bp->link_params.loopback_mode = LOOPBACK_XGXS;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001784 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1785 }
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001786
Eilon Greenstein19680c42008-08-13 15:47:33 -07001787 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001788
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001789 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001790
Eilon Greenstein3c96c682009-01-14 21:25:31 -08001791 bnx2x_calc_fc_adv(bp);
1792
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001793 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1794 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001795 bnx2x_link_report(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001796 }
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001797 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
Eilon Greenstein19680c42008-08-13 15:47:33 -07001798 return rc;
1799 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001800 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
Eilon Greenstein19680c42008-08-13 15:47:33 -07001801 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001802}
1803
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001804void bnx2x_link_set(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001805{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001806 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001807 bnx2x_acquire_phy_lock(bp);
Yaniv Rosner54c2fb72010-09-01 09:51:23 +00001808 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001809 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001810 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001811
Eilon Greenstein19680c42008-08-13 15:47:33 -07001812 bnx2x_calc_fc_adv(bp);
1813 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00001814 BNX2X_ERR("Bootcode is missing - can not set link\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001815}
1816
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001817static void bnx2x__link_reset(struct bnx2x *bp)
1818{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001819 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001820 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein589abe32009-02-12 08:36:55 +00001821 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001822 bnx2x_release_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001823 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00001824 BNX2X_ERR("Bootcode is missing - can not reset link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001825}
1826
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001827u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001828{
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001829 u8 rc = 0;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001830
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001831 if (!BP_NOMCP(bp)) {
1832 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001833 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1834 is_serdes);
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001835 bnx2x_release_phy_lock(bp);
1836 } else
1837 BNX2X_ERR("Bootcode is missing - can not test link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001838
1839 return rc;
1840}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001841
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001842static void bnx2x_init_port_minmax(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001843{
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001844 u32 r_param = bp->link_vars.line_speed / 8;
1845 u32 fair_periodic_timeout_usec;
1846 u32 t_fair;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001847
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001848 memset(&(bp->cmng.rs_vars), 0,
1849 sizeof(struct rate_shaping_vars_per_port));
1850 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001851
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001852 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1853 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001854
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001855 /* this is the threshold below which no timer arming will occur
1856 1.25 coefficient is for the threshold to be a little bigger
1857 than the real time, to compensate for timer in-accuracy */
1858 bp->cmng.rs_vars.rs_threshold =
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001859 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1860
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001861 /* resolution of fairness timer */
1862 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1863 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1864 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001865
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001866 /* this is the threshold below which we won't arm the timer anymore */
1867 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001868
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001869 /* we multiply by 1e3/8 to get bytes/msec.
1870 We don't want the credits to pass a credit
1871 of the t_fair*FAIR_MEM (algorithm resolution) */
1872 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1873 /* since each tick is 4 usec */
1874 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001875}
1876
Eilon Greenstein2691d512009-08-12 08:22:08 +00001877/* Calculates the sum of vn_min_rates.
1878 It's needed for further normalizing of the min_rates.
1879 Returns:
1880 sum of vn_min_rates.
1881 or
1882 0 - if all the min_rates are 0.
1883 In the later case fainess algorithm should be deactivated.
1884 If not all min_rates are zero then those that are zeroes will be set to 1.
1885 */
1886static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1887{
1888 int all_zero = 1;
Eilon Greenstein2691d512009-08-12 08:22:08 +00001889 int vn;
1890
1891 bp->vn_weight_sum = 0;
1892 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001893 u32 vn_cfg = bp->mf_config[vn];
Eilon Greenstein2691d512009-08-12 08:22:08 +00001894 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1895 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1896
1897 /* Skip hidden vns */
1898 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1899 continue;
1900
1901 /* If min rate is zero - set it to 1 */
1902 if (!vn_min_rate)
1903 vn_min_rate = DEF_MIN_RATE;
1904 else
1905 all_zero = 0;
1906
1907 bp->vn_weight_sum += vn_min_rate;
1908 }
1909
Dmitry Kravkov30ae438b2011-06-14 01:33:13 +00001910 /* if ETS or all min rates are zeros - disable fairness */
1911 if (BNX2X_IS_ETS_ENABLED(bp)) {
1912 bp->cmng.flags.cmng_enables &=
1913 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1914 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
1915 } else if (all_zero) {
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001916 bp->cmng.flags.cmng_enables &=
1917 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1918 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1919 " fairness will be disabled\n");
1920 } else
1921 bp->cmng.flags.cmng_enables |=
1922 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
Eilon Greenstein2691d512009-08-12 08:22:08 +00001923}
1924
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001925static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001926{
1927 struct rate_shaping_vars_per_vn m_rs_vn;
1928 struct fairness_vars_per_vn m_fair_vn;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001929 u32 vn_cfg = bp->mf_config[vn];
1930 int func = 2*vn + BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001931 u16 vn_min_rate, vn_max_rate;
1932 int i;
1933
1934 /* If function is hidden - set min and max to zeroes */
1935 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1936 vn_min_rate = 0;
1937 vn_max_rate = 0;
1938
1939 } else {
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001940 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
1941
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001942 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1943 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001944 /* If fairness is enabled (not all min rates are zeroes) and
1945 if current min rate is zero - set it to 1.
1946 This is a requirement of the algorithm. */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001947 if (bp->vn_weight_sum && (vn_min_rate == 0))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001948 vn_min_rate = DEF_MIN_RATE;
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001949
1950 if (IS_MF_SI(bp))
1951 /* maxCfg in percents of linkspeed */
1952 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
1953 else
1954 /* maxCfg is absolute in 100Mb units */
1955 vn_max_rate = maxCfg * 100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001956 }
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001957
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001958 DP(NETIF_MSG_IFUP,
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001959 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001960 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001961
1962 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1963 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1964
1965 /* global vn counter - maximal Mbps for this vn */
1966 m_rs_vn.vn_counter.rate = vn_max_rate;
1967
1968 /* quota - number of bytes transmitted in this period */
1969 m_rs_vn.vn_counter.quota =
1970 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1971
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001972 if (bp->vn_weight_sum) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001973 /* credit for each period of the fairness algorithm:
1974 number of bytes in T_FAIR (the vn share the port rate).
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001975 vn_weight_sum should not be larger than 10000, thus
1976 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1977 than zero */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001978 m_fair_vn.vn_credit_delta =
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00001979 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1980 (8 * bp->vn_weight_sum))),
Dmitry Kravkovff80ee02011-02-28 03:37:11 +00001981 (bp->cmng.fair_vars.fair_threshold +
1982 MIN_ABOVE_THRESH));
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00001983 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001984 m_fair_vn.vn_credit_delta);
1985 }
1986
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001987 /* Store it to internal memory */
1988 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1989 REG_WR(bp, BAR_XSTRORM_INTMEM +
1990 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1991 ((u32 *)(&m_rs_vn))[i]);
1992
1993 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1994 REG_WR(bp, BAR_XSTRORM_INTMEM +
1995 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1996 ((u32 *)(&m_fair_vn))[i]);
1997}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001998
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001999static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2000{
2001 if (CHIP_REV_IS_SLOW(bp))
2002 return CMNG_FNS_NONE;
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00002003 if (IS_MF(bp))
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002004 return CMNG_FNS_MINMAX;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002005
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002006 return CMNG_FNS_NONE;
2007}
2008
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00002009void bnx2x_read_mf_cfg(struct bnx2x *bp)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002010{
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002011 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002012
2013 if (BP_NOMCP(bp))
2014 return; /* what should be the default bvalue in this case */
2015
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002016 /* For 2 port configuration the absolute function number formula
2017 * is:
2018 * abs_func = 2 * vn + BP_PORT + BP_PATH
2019 *
2020 * and there are 4 functions per port
2021 *
2022 * For 4 port configuration it is
2023 * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
2024 *
2025 * and there are 2 functions per port
2026 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002027 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002028 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2029
2030 if (func >= E1H_FUNC_MAX)
2031 break;
2032
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002033 bp->mf_config[vn] =
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002034 MF_CFG_RD(bp, func_mf_config[func].config);
2035 }
2036}
2037
2038static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2039{
2040
2041 if (cmng_type == CMNG_FNS_MINMAX) {
2042 int vn;
2043
2044 /* clear cmng_enables */
2045 bp->cmng.flags.cmng_enables = 0;
2046
2047 /* read mf conf from shmem */
2048 if (read_cfg)
2049 bnx2x_read_mf_cfg(bp);
2050
2051 /* Init rate shaping and fairness contexts */
2052 bnx2x_init_port_minmax(bp);
2053
2054 /* vn_weight_sum and enable fairness if not 0 */
2055 bnx2x_calc_vn_weight_sum(bp);
2056
2057 /* calculate and set min-max rate for each vn */
Dmitry Kravkovc4154f22011-03-06 10:49:25 +00002058 if (bp->port.pmf)
2059 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2060 bnx2x_init_vn_minmax(bp, vn);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002061
2062 /* always enable rate shaping and fairness */
2063 bp->cmng.flags.cmng_enables |=
2064 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2065 if (!bp->vn_weight_sum)
2066 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2067 " fairness will be disabled\n");
2068 return;
2069 }
2070
2071 /* rate shaping and fairness are disabled */
2072 DP(NETIF_MSG_IFUP,
2073 "rate shaping and fairness are disabled\n");
2074}
2075
2076static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2077{
2078 int port = BP_PORT(bp);
2079 int func;
2080 int vn;
2081
2082 /* Set the attention towards other drivers on the same port */
2083 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2084 if (vn == BP_E1HVN(bp))
2085 continue;
2086
2087 func = ((vn << 1) | port);
2088 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2089 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2090 }
2091}
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002092
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002093/* This function is called upon link interrupt */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002094static void bnx2x_link_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002095{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002096 /* Make sure that we are synced with the current statistics */
2097 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2098
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002099 bnx2x_link_update(&bp->link_params, &bp->link_vars);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002100
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002101 if (bp->link_vars.link_up) {
2102
Eilon Greenstein1c063282009-02-12 08:36:43 +00002103 /* dropless flow control */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002104 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
Eilon Greenstein1c063282009-02-12 08:36:43 +00002105 int port = BP_PORT(bp);
2106 u32 pause_enabled = 0;
2107
2108 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2109 pause_enabled = 1;
2110
2111 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07002112 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
Eilon Greenstein1c063282009-02-12 08:36:43 +00002113 pause_enabled);
2114 }
2115
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002116 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2117 struct host_port_stats *pstats;
2118
2119 pstats = bnx2x_sp(bp, port_stats);
2120 /* reset old bmac stats */
2121 memset(&(pstats->mac_stx[0]), 0,
2122 sizeof(struct mac_stx));
2123 }
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002124 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002125 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2126 }
2127
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002128 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2129 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002130
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002131 if (cmng_fns != CMNG_FNS_NONE) {
2132 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2133 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2134 } else
2135 /* rate shaping and fairness are disabled */
2136 DP(NETIF_MSG_IFUP,
2137 "single function mode without fairness\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002138 }
Dmitry Kravkov9fdc3e92011-03-06 10:49:15 +00002139
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00002140 __bnx2x_link_report(bp);
2141
Dmitry Kravkov9fdc3e92011-03-06 10:49:15 +00002142 if (IS_MF(bp))
2143 bnx2x_link_sync_notify(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002144}
2145
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002146void bnx2x__link_status_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002147{
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00002148 if (bp->state != BNX2X_STATE_OPEN)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002149 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002150
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002151 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2152
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002153 if (bp->link_vars.link_up)
2154 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2155 else
2156 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2157
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002158 /* indicate link status */
2159 bnx2x_link_report(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002160}
2161
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002162static void bnx2x_pmf_update(struct bnx2x *bp)
2163{
2164 int port = BP_PORT(bp);
2165 u32 val;
2166
2167 bp->port.pmf = 1;
2168 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2169
2170 /* enable nig attention */
2171 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002172 if (bp->common.int_block == INT_BLOCK_HC) {
2173 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2174 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2175 } else if (CHIP_IS_E2(bp)) {
2176 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2177 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2178 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002179
2180 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002181}
2182
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002183/* end of Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002184
2185/* slow path */
2186
2187/*
2188 * General service functions
2189 */
2190
Eilon Greenstein2691d512009-08-12 08:22:08 +00002191/* send the MCP a request, block until there is a reply */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002192u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
Eilon Greenstein2691d512009-08-12 08:22:08 +00002193{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002194 int mb_idx = BP_FW_MB_IDX(bp);
Dmitry Kravkova5971d42011-05-25 04:55:51 +00002195 u32 seq;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002196 u32 rc = 0;
2197 u32 cnt = 1;
2198 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2199
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002200 mutex_lock(&bp->fw_mb_mutex);
Dmitry Kravkova5971d42011-05-25 04:55:51 +00002201 seq = ++bp->fw_seq;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002202 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2203 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2204
Eilon Greenstein2691d512009-08-12 08:22:08 +00002205 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2206
2207 do {
2208 /* let the FW do it's magic ... */
2209 msleep(delay);
2210
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002211 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002212
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002213 /* Give the FW up to 5 second (500*10ms) */
2214 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
Eilon Greenstein2691d512009-08-12 08:22:08 +00002215
2216 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2217 cnt*delay, rc, seq);
2218
2219 /* is this a reply to our command? */
2220 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2221 rc &= FW_MSG_CODE_MASK;
2222 else {
2223 /* FW BUG! */
2224 BNX2X_ERR("FW failed to respond!\n");
2225 bnx2x_fw_dump(bp);
2226 rc = 0;
2227 }
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002228 mutex_unlock(&bp->fw_mb_mutex);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002229
2230 return rc;
2231}
2232
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002233static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
2234{
2235#ifdef BCM_CNIC
2236 if (IS_FCOE_FP(fp) && IS_MF(bp))
2237 return false;
2238#endif
2239 return true;
2240}
2241
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002242/* must be called under rtnl_lock */
stephen hemminger8d962862010-10-21 07:50:56 +00002243static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002244{
2245 u32 mask = (1 << cl_id);
2246
2247 /* initial seeting is BNX2X_ACCEPT_NONE */
2248 u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2249 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2250 u8 unmatched_unicast = 0;
2251
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002252 if (filters & BNX2X_ACCEPT_UNMATCHED_UCAST)
2253 unmatched_unicast = 1;
2254
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002255 if (filters & BNX2X_PROMISCUOUS_MODE) {
2256 /* promiscious - accept all, drop none */
2257 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2258 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002259 if (IS_MF_SI(bp)) {
2260 /*
2261 * SI mode defines to accept in promiscuos mode
2262 * only unmatched packets
2263 */
2264 unmatched_unicast = 1;
2265 accp_all_ucast = 0;
2266 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002267 }
2268 if (filters & BNX2X_ACCEPT_UNICAST) {
2269 /* accept matched ucast */
2270 drop_all_ucast = 0;
2271 }
Vladislav Zolotarovd9c8f492011-02-01 14:05:30 -08002272 if (filters & BNX2X_ACCEPT_MULTICAST)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002273 /* accept matched mcast */
2274 drop_all_mcast = 0;
Vladislav Zolotarovd9c8f492011-02-01 14:05:30 -08002275
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002276 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2277 /* accept all mcast */
2278 drop_all_ucast = 0;
2279 accp_all_ucast = 1;
2280 }
2281 if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2282 /* accept all mcast */
2283 drop_all_mcast = 0;
2284 accp_all_mcast = 1;
2285 }
2286 if (filters & BNX2X_ACCEPT_BROADCAST) {
2287 /* accept (all) bcast */
2288 drop_all_bcast = 0;
2289 accp_all_bcast = 1;
2290 }
2291
2292 bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2293 bp->mac_filters.ucast_drop_all | mask :
2294 bp->mac_filters.ucast_drop_all & ~mask;
2295
2296 bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2297 bp->mac_filters.mcast_drop_all | mask :
2298 bp->mac_filters.mcast_drop_all & ~mask;
2299
2300 bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2301 bp->mac_filters.bcast_drop_all | mask :
2302 bp->mac_filters.bcast_drop_all & ~mask;
2303
2304 bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2305 bp->mac_filters.ucast_accept_all | mask :
2306 bp->mac_filters.ucast_accept_all & ~mask;
2307
2308 bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2309 bp->mac_filters.mcast_accept_all | mask :
2310 bp->mac_filters.mcast_accept_all & ~mask;
2311
2312 bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2313 bp->mac_filters.bcast_accept_all | mask :
2314 bp->mac_filters.bcast_accept_all & ~mask;
2315
2316 bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2317 bp->mac_filters.unmatched_unicast | mask :
2318 bp->mac_filters.unmatched_unicast & ~mask;
2319}
2320
stephen hemminger8d962862010-10-21 07:50:56 +00002321static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002322{
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002323 struct tstorm_eth_function_common_config tcfg = {0};
2324 u16 rss_flgs;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002325
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002326 /* tpa */
2327 if (p->func_flgs & FUNC_FLG_TPA)
2328 tcfg.config_flags |=
2329 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002330
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002331 /* set rss flags */
2332 rss_flgs = (p->rss->mode <<
2333 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002334
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002335 if (p->rss->cap & RSS_IPV4_CAP)
2336 rss_flgs |= RSS_IPV4_CAP_MASK;
2337 if (p->rss->cap & RSS_IPV4_TCP_CAP)
2338 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2339 if (p->rss->cap & RSS_IPV6_CAP)
2340 rss_flgs |= RSS_IPV6_CAP_MASK;
2341 if (p->rss->cap & RSS_IPV6_TCP_CAP)
2342 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002343
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002344 tcfg.config_flags |= rss_flgs;
2345 tcfg.rss_result_mask = p->rss->result_mask;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002346
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002347 storm_memset_func_cfg(bp, &tcfg, p->func_id);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002348
2349 /* Enable the function in the FW */
2350 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2351 storm_memset_func_en(bp, p->func_id, 1);
2352
2353 /* statistics */
2354 if (p->func_flgs & FUNC_FLG_STATS) {
2355 struct stats_indication_flags stats_flags = {0};
2356 stats_flags.collect_eth = 1;
2357
2358 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2359 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2360
2361 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2362 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2363
2364 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2365 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2366
2367 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2368 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2369 }
2370
2371 /* spq */
2372 if (p->func_flgs & FUNC_FLG_SPQ) {
2373 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2374 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2375 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2376 }
2377}
2378
2379static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2380 struct bnx2x_fastpath *fp)
2381{
2382 u16 flags = 0;
2383
2384 /* calculate queue flags */
2385 flags |= QUEUE_FLG_CACHE_ALIGN;
2386 flags |= QUEUE_FLG_HC;
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002387 flags |= IS_MF_SD(bp) ? QUEUE_FLG_OV : 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002388
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002389 flags |= QUEUE_FLG_VLAN;
2390 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002391
2392 if (!fp->disable_tpa)
2393 flags |= QUEUE_FLG_TPA;
2394
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002395 flags = stat_counter_valid(bp, fp) ?
2396 (flags | QUEUE_FLG_STATS) : (flags & ~QUEUE_FLG_STATS);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002397
2398 return flags;
2399}
2400
2401static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2402 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2403 struct bnx2x_rxq_init_params *rxq_init)
2404{
2405 u16 max_sge = 0;
2406 u16 sge_sz = 0;
2407 u16 tpa_agg_size = 0;
2408
2409 /* calculate queue flags */
2410 u16 flags = bnx2x_get_cl_flags(bp, fp);
2411
2412 if (!fp->disable_tpa) {
2413 pause->sge_th_hi = 250;
2414 pause->sge_th_lo = 150;
2415 tpa_agg_size = min_t(u32,
2416 (min_t(u32, 8, MAX_SKB_FRAGS) *
2417 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2418 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2419 SGE_PAGE_SHIFT;
2420 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2421 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2422 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2423 0xffff);
2424 }
2425
2426 /* pause - not for e1 */
2427 if (!CHIP_IS_E1(bp)) {
2428 pause->bd_th_hi = 350;
2429 pause->bd_th_lo = 250;
2430 pause->rcq_th_hi = 350;
2431 pause->rcq_th_lo = 250;
2432 pause->sge_th_hi = 0;
2433 pause->sge_th_lo = 0;
2434 pause->pri_map = 1;
2435 }
2436
2437 /* rxq setup */
2438 rxq_init->flags = flags;
2439 rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2440 rxq_init->dscr_map = fp->rx_desc_mapping;
2441 rxq_init->sge_map = fp->rx_sge_mapping;
2442 rxq_init->rcq_map = fp->rx_comp_mapping;
2443 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002444
2445 /* Always use mini-jumbo MTU for FCoE L2 ring */
2446 if (IS_FCOE_FP(fp))
2447 rxq_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2448 else
2449 rxq_init->mtu = bp->dev->mtu;
2450
2451 rxq_init->buf_sz = fp->rx_buf_size;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002452 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2453 rxq_init->cl_id = fp->cl_id;
2454 rxq_init->spcl_id = fp->cl_id;
2455 rxq_init->stat_id = fp->cl_id;
2456 rxq_init->tpa_agg_sz = tpa_agg_size;
2457 rxq_init->sge_buf_sz = sge_sz;
2458 rxq_init->max_sges_pkt = max_sge;
2459 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2460 rxq_init->fw_sb_id = fp->fw_sb_id;
2461
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002462 if (IS_FCOE_FP(fp))
2463 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
2464 else
2465 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002466
2467 rxq_init->cid = HW_CID(bp, fp->cid);
2468
2469 rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2470}
2471
2472static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2473 struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2474{
2475 u16 flags = bnx2x_get_cl_flags(bp, fp);
2476
2477 txq_init->flags = flags;
2478 txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2479 txq_init->dscr_map = fp->tx_desc_mapping;
2480 txq_init->stat_id = fp->cl_id;
2481 txq_init->cid = HW_CID(bp, fp->cid);
2482 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2483 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2484 txq_init->fw_sb_id = fp->fw_sb_id;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002485
2486 if (IS_FCOE_FP(fp)) {
2487 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
2488 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
2489 }
2490
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002491 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2492}
2493
stephen hemminger8d962862010-10-21 07:50:56 +00002494static void bnx2x_pf_init(struct bnx2x *bp)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002495{
2496 struct bnx2x_func_init_params func_init = {0};
2497 struct bnx2x_rss_params rss = {0};
2498 struct event_ring_data eq_data = { {0} };
2499 u16 flags;
2500
2501 /* pf specific setups */
2502 if (!CHIP_IS_E1(bp))
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00002503 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002504
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002505 if (CHIP_IS_E2(bp)) {
2506 /* reset IGU PF statistics: MSIX + ATTN */
2507 /* PF */
2508 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2509 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2510 (CHIP_MODE_IS_4_PORT(bp) ?
2511 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2512 /* ATTN */
2513 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2514 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2515 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2516 (CHIP_MODE_IS_4_PORT(bp) ?
2517 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2518 }
2519
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002520 /* function setup flags */
2521 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2522
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002523 if (CHIP_IS_E1x(bp))
2524 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2525 else
2526 flags |= FUNC_FLG_TPA;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002527
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002528 /* function setup */
2529
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002530 /**
2531 * Although RSS is meaningless when there is a single HW queue we
2532 * still need it enabled in order to have HW Rx hash generated.
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002533 */
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002534 rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2535 RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2536 rss.mode = bp->multi_mode;
2537 rss.result_mask = MULTI_MASK;
2538 func_init.rss = &rss;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002539
2540 func_init.func_flgs = flags;
2541 func_init.pf_id = BP_FUNC(bp);
2542 func_init.func_id = BP_FUNC(bp);
2543 func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2544 func_init.spq_map = bp->spq_mapping;
2545 func_init.spq_prod = bp->spq_prod_idx;
2546
2547 bnx2x_func_init(bp, &func_init);
2548
2549 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2550
2551 /*
2552 Congestion management values depend on the link rate
2553 There is no active link so initial link rate is set to 10 Gbps.
2554 When the link comes up The congestion management values are
2555 re-calculated according to the actual link rate.
2556 */
2557 bp->link_vars.line_speed = SPEED_10000;
2558 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2559
2560 /* Only the PMF sets the HW */
2561 if (bp->port.pmf)
2562 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2563
2564 /* no rx until link is up */
2565 bp->rx_mode = BNX2X_RX_MODE_NONE;
2566 bnx2x_set_storm_rx_mode(bp);
2567
2568 /* init Event Queue */
2569 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2570 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2571 eq_data.producer = bp->eq_prod;
2572 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2573 eq_data.sb_id = DEF_SB_ID;
2574 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2575}
2576
2577
Eilon Greenstein2691d512009-08-12 08:22:08 +00002578static void bnx2x_e1h_disable(struct bnx2x *bp)
2579{
2580 int port = BP_PORT(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002581
2582 netif_tx_disable(bp->dev);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002583
2584 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2585
Eilon Greenstein2691d512009-08-12 08:22:08 +00002586 netif_carrier_off(bp->dev);
2587}
2588
2589static void bnx2x_e1h_enable(struct bnx2x *bp)
2590{
2591 int port = BP_PORT(bp);
2592
2593 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2594
Eilon Greenstein2691d512009-08-12 08:22:08 +00002595 /* Tx queue should be only reenabled */
2596 netif_tx_wake_all_queues(bp->dev);
2597
Eilon Greenstein061bc702009-10-15 00:18:47 -07002598 /*
2599 * Should not call netif_carrier_on since it will be called if the link
2600 * is up when checking for link state
2601 */
Eilon Greenstein2691d512009-08-12 08:22:08 +00002602}
2603
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002604/* called due to MCP event (on pmf):
2605 * reread new bandwidth configuration
2606 * configure FW
2607 * notify others function about the change
2608 */
2609static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
2610{
2611 if (bp->link_vars.link_up) {
2612 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2613 bnx2x_link_sync_notify(bp);
2614 }
2615 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2616}
2617
2618static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
2619{
2620 bnx2x_config_mf_bw(bp);
2621 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
2622}
2623
Eilon Greenstein2691d512009-08-12 08:22:08 +00002624static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2625{
Eilon Greenstein2691d512009-08-12 08:22:08 +00002626 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002627
2628 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2629
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002630 /*
2631 * This is the only place besides the function initialization
2632 * where the bp->flags can change so it is done without any
2633 * locks
2634 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002635 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
Eilon Greenstein2691d512009-08-12 08:22:08 +00002636 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002637 bp->flags |= MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002638
2639 bnx2x_e1h_disable(bp);
2640 } else {
2641 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002642 bp->flags &= ~MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002643
2644 bnx2x_e1h_enable(bp);
2645 }
2646 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2647 }
2648 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002649 bnx2x_config_mf_bw(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002650 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2651 }
2652
2653 /* Report results to MCP */
2654 if (dcc_event)
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002655 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002656 else
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002657 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002658}
2659
Michael Chan28912902009-10-10 13:46:53 +00002660/* must be called under the spq lock */
2661static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2662{
2663 struct eth_spe *next_spe = bp->spq_prod_bd;
2664
2665 if (bp->spq_prod_bd == bp->spq_last_bd) {
2666 bp->spq_prod_bd = bp->spq;
2667 bp->spq_prod_idx = 0;
2668 DP(NETIF_MSG_TIMER, "end of spq\n");
2669 } else {
2670 bp->spq_prod_bd++;
2671 bp->spq_prod_idx++;
2672 }
2673 return next_spe;
2674}
2675
2676/* must be called under the spq lock */
2677static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2678{
2679 int func = BP_FUNC(bp);
2680
2681 /* Make sure that BD data is updated before writing the producer */
2682 wmb();
2683
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002684 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002685 bp->spq_prod_idx);
Michael Chan28912902009-10-10 13:46:53 +00002686 mmiowb();
2687}
2688
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002689/* the slow path queue is odd since completions arrive on the fastpath ring */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002690int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002691 u32 data_hi, u32 data_lo, int common)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002692{
Michael Chan28912902009-10-10 13:46:53 +00002693 struct eth_spe *spe;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002694 u16 type;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002695
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002696#ifdef BNX2X_STOP_ON_ERROR
2697 if (unlikely(bp->panic))
2698 return -EIO;
2699#endif
2700
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002701 spin_lock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002702
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08002703 if (common) {
2704 if (!atomic_read(&bp->eq_spq_left)) {
2705 BNX2X_ERR("BUG! EQ ring full!\n");
2706 spin_unlock_bh(&bp->spq_lock);
2707 bnx2x_panic();
2708 return -EBUSY;
2709 }
2710 } else if (!atomic_read(&bp->cq_spq_left)) {
2711 BNX2X_ERR("BUG! SPQ ring full!\n");
2712 spin_unlock_bh(&bp->spq_lock);
2713 bnx2x_panic();
2714 return -EBUSY;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002715 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08002716
Michael Chan28912902009-10-10 13:46:53 +00002717 spe = bnx2x_sp_get_next(bp);
2718
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002719 /* CID needs port number to be encoded int it */
Michael Chan28912902009-10-10 13:46:53 +00002720 spe->hdr.conn_and_cmd_data =
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002721 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2722 HW_CID(bp, cid));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002723
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002724 if (common)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002725 /* Common ramrods:
2726 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2727 * TRAFFIC_STOP, TRAFFIC_START
2728 */
2729 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2730 & SPE_HDR_CONN_TYPE;
2731 else
2732 /* ETH ramrods: SETUP, HALT */
2733 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2734 & SPE_HDR_CONN_TYPE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002735
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002736 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2737 SPE_HDR_FUNCTION_ID);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002738
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002739 spe->hdr.type = cpu_to_le16(type);
2740
2741 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2742 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2743
2744 /* stats ramrod has it's own slot on the spq */
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08002745 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002746 /* It's ok if the actual decrement is issued towards the memory
2747 * somewhere between the spin_lock and spin_unlock. Thus no
2748 * more explict memory barrier is needed.
2749 */
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08002750 if (common)
2751 atomic_dec(&bp->eq_spq_left);
2752 else
2753 atomic_dec(&bp->cq_spq_left);
2754 }
2755
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002756
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002757 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002758 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08002759 "type(0x%x) left (ETH, COMMON) (%x,%x)\n",
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002760 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2761 (u32)(U64_LO(bp->spq_mapping) +
2762 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08002763 HW_CID(bp, cid), data_hi, data_lo, type,
2764 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002765
Michael Chan28912902009-10-10 13:46:53 +00002766 bnx2x_sp_prod_update(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002767 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002768 return 0;
2769}
2770
2771/* acquire split MCP access lock register */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002772static int bnx2x_acquire_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002773{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002774 u32 j, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002775 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002776
2777 might_sleep();
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002778 for (j = 0; j < 1000; j++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002779 val = (1UL << 31);
2780 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2781 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2782 if (val & (1L << 31))
2783 break;
2784
2785 msleep(5);
2786 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002787 if (!(val & (1L << 31))) {
Eilon Greenstein19680c42008-08-13 15:47:33 -07002788 BNX2X_ERR("Cannot acquire MCP access lock register\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002789 rc = -EBUSY;
2790 }
2791
2792 return rc;
2793}
2794
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002795/* release split MCP access lock register */
2796static void bnx2x_release_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002797{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002798 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002799}
2800
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002801#define BNX2X_DEF_SB_ATT_IDX 0x0001
2802#define BNX2X_DEF_SB_IDX 0x0002
2803
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002804static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2805{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002806 struct host_sp_status_block *def_sb = bp->def_status_blk;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002807 u16 rc = 0;
2808
2809 barrier(); /* status block is written to by the chip */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002810 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2811 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002812 rc |= BNX2X_DEF_SB_ATT_IDX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002813 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002814
2815 if (bp->def_idx != def_sb->sp_sb.running_index) {
2816 bp->def_idx = def_sb->sp_sb.running_index;
2817 rc |= BNX2X_DEF_SB_IDX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002818 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002819
2820 /* Do not reorder: indecies reading should complete before handling */
2821 barrier();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002822 return rc;
2823}
2824
2825/*
2826 * slow path service functions
2827 */
2828
2829static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2830{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002831 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002832 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2833 MISC_REG_AEU_MASK_ATTN_FUNC_0;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002834 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2835 NIG_REG_MASK_INTERRUPT_PORT0;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002836 u32 aeu_mask;
Eilon Greenstein87942b42009-02-12 08:36:49 +00002837 u32 nig_mask = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002838 u32 reg_addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002839
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002840 if (bp->attn_state & asserted)
2841 BNX2X_ERR("IGU ERROR\n");
2842
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002843 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2844 aeu_mask = REG_RD(bp, aeu_addr);
2845
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002846 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002847 aeu_mask, asserted);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002848 aeu_mask &= ~(asserted & 0x3ff);
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002849 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002850
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002851 REG_WR(bp, aeu_addr, aeu_mask);
2852 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002853
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002854 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002855 bp->attn_state |= asserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002856 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002857
2858 if (asserted & ATTN_HARD_WIRED_MASK) {
2859 if (asserted & ATTN_NIG_FOR_FUNC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002860
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002861 bnx2x_acquire_phy_lock(bp);
2862
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002863 /* save nig interrupt mask */
Eilon Greenstein87942b42009-02-12 08:36:49 +00002864 nig_mask = REG_RD(bp, nig_int_mask_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002865
Yaniv Rosner361c3912011-06-14 01:33:19 +00002866 /* If nig_mask is not set, no need to call the update
2867 * function.
2868 */
2869 if (nig_mask) {
2870 REG_WR(bp, nig_int_mask_addr, 0);
2871
2872 bnx2x_link_attn(bp);
2873 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002874
2875 /* handle unicore attn? */
2876 }
2877 if (asserted & ATTN_SW_TIMER_4_FUNC)
2878 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2879
2880 if (asserted & GPIO_2_FUNC)
2881 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2882
2883 if (asserted & GPIO_3_FUNC)
2884 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2885
2886 if (asserted & GPIO_4_FUNC)
2887 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2888
2889 if (port == 0) {
2890 if (asserted & ATTN_GENERAL_ATTN_1) {
2891 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2892 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2893 }
2894 if (asserted & ATTN_GENERAL_ATTN_2) {
2895 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2896 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2897 }
2898 if (asserted & ATTN_GENERAL_ATTN_3) {
2899 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2900 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2901 }
2902 } else {
2903 if (asserted & ATTN_GENERAL_ATTN_4) {
2904 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2905 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2906 }
2907 if (asserted & ATTN_GENERAL_ATTN_5) {
2908 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2909 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2910 }
2911 if (asserted & ATTN_GENERAL_ATTN_6) {
2912 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2913 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2914 }
2915 }
2916
2917 } /* if hardwired */
2918
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002919 if (bp->common.int_block == INT_BLOCK_HC)
2920 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2921 COMMAND_REG_ATTN_BITS_SET);
2922 else
2923 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2924
2925 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2926 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2927 REG_WR(bp, reg_addr, asserted);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002928
2929 /* now set back the mask */
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002930 if (asserted & ATTN_NIG_FOR_FUNC) {
Eilon Greenstein87942b42009-02-12 08:36:49 +00002931 REG_WR(bp, nig_int_mask_addr, nig_mask);
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002932 bnx2x_release_phy_lock(bp);
2933 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002934}
2935
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002936static inline void bnx2x_fan_failure(struct bnx2x *bp)
2937{
2938 int port = BP_PORT(bp);
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002939 u32 ext_phy_config;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002940 /* mark the failure */
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002941 ext_phy_config =
2942 SHMEM_RD(bp,
2943 dev_info.port_hw_config[port].external_phy_config);
2944
2945 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2946 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002947 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002948 ext_phy_config);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002949
2950 /* log the failure */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002951 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2952 " the driver to shutdown the card to prevent permanent"
2953 " damage. Please contact OEM Support for assistance\n");
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002954}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00002955
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002956static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2957{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002958 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002959 int reg_offset;
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00002960 u32 val;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002961
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002962 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2963 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002964
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002965 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002966
2967 val = REG_RD(bp, reg_offset);
2968 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2969 REG_WR(bp, reg_offset, val);
2970
2971 BNX2X_ERR("SPIO5 hw attention\n");
2972
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002973 /* Fan failure attention */
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00002974 bnx2x_hw_reset_phy(&bp->link_params);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002975 bnx2x_fan_failure(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002976 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002977
Eilon Greenstein589abe32009-02-12 08:36:55 +00002978 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2979 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2980 bnx2x_acquire_phy_lock(bp);
2981 bnx2x_handle_module_detect_int(&bp->link_params);
2982 bnx2x_release_phy_lock(bp);
2983 }
2984
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002985 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2986
2987 val = REG_RD(bp, reg_offset);
2988 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2989 REG_WR(bp, reg_offset, val);
2990
2991 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002992 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002993 bnx2x_panic();
2994 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002995}
2996
2997static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2998{
2999 u32 val;
3000
Eilon Greenstein0626b892009-02-12 08:38:14 +00003001 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003002
3003 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3004 BNX2X_ERR("DB hw attention 0x%x\n", val);
3005 /* DORQ discard attention */
3006 if (val & 0x2)
3007 BNX2X_ERR("FATAL error from DORQ\n");
3008 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003009
3010 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3011
3012 int port = BP_PORT(bp);
3013 int reg_offset;
3014
3015 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3016 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3017
3018 val = REG_RD(bp, reg_offset);
3019 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3020 REG_WR(bp, reg_offset, val);
3021
3022 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00003023 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003024 bnx2x_panic();
3025 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003026}
3027
3028static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3029{
3030 u32 val;
3031
3032 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3033
3034 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3035 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3036 /* CFC error attention */
3037 if (val & 0x2)
3038 BNX2X_ERR("FATAL error from CFC\n");
3039 }
3040
3041 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3042
3043 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3044 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3045 /* RQ_USDMDP_FIFO_OVERFLOW */
3046 if (val & 0x18000)
3047 BNX2X_ERR("FATAL error from PXP\n");
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003048 if (CHIP_IS_E2(bp)) {
3049 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
3050 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
3051 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003052 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003053
3054 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3055
3056 int port = BP_PORT(bp);
3057 int reg_offset;
3058
3059 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3060 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3061
3062 val = REG_RD(bp, reg_offset);
3063 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3064 REG_WR(bp, reg_offset, val);
3065
3066 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00003067 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003068 bnx2x_panic();
3069 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003070}
3071
3072static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3073{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003074 u32 val;
3075
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003076 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3077
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003078 if (attn & BNX2X_PMF_LINK_ASSERT) {
3079 int func = BP_FUNC(bp);
3080
3081 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003082 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3083 func_mf_config[BP_ABS_FUNC(bp)].config);
3084 val = SHMEM_RD(bp,
3085 func_mb[BP_FW_MB_IDX(bp)].drv_status);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003086 if (val & DRV_STATUS_DCC_EVENT_MASK)
3087 bnx2x_dcc_event(bp,
3088 (val & DRV_STATUS_DCC_EVENT_MASK));
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08003089
3090 if (val & DRV_STATUS_SET_MF_BW)
3091 bnx2x_set_mf_bw(bp);
3092
Eilon Greenstein2691d512009-08-12 08:22:08 +00003093 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003094 bnx2x_pmf_update(bp);
3095
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00003096 /* Always call it here: bnx2x_link_report() will
3097 * prevent the link indication duplication.
3098 */
3099 bnx2x__link_status_update(bp);
3100
Vladislav Zolotarove4901dd2010-12-13 05:44:18 +00003101 if (bp->port.pmf &&
Shmulik Ravid785b9b12010-12-30 06:27:03 +00003102 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
3103 bp->dcbx_enabled > 0)
Vladislav Zolotarove4901dd2010-12-13 05:44:18 +00003104 /* start dcbx state machine */
3105 bnx2x_dcbx_set_params(bp,
3106 BNX2X_DCBX_STATE_NEG_RECEIVED);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003107 } else if (attn & BNX2X_MC_ASSERT_BITS) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003108
3109 BNX2X_ERR("MC assert!\n");
3110 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3111 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3112 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3113 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3114 bnx2x_panic();
3115
3116 } else if (attn & BNX2X_MCP_ASSERT) {
3117
3118 BNX2X_ERR("MCP assert!\n");
3119 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003120 bnx2x_fw_dump(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003121
3122 } else
3123 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3124 }
3125
3126 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003127 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3128 if (attn & BNX2X_GRC_TIMEOUT) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003129 val = CHIP_IS_E1(bp) ? 0 :
3130 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003131 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3132 }
3133 if (attn & BNX2X_GRC_RSV) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003134 val = CHIP_IS_E1(bp) ? 0 :
3135 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003136 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3137 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003138 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003139 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003140}
3141
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003142#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3143#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3144#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3145#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3146#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003147
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003148/*
3149 * should be run under rtnl lock
3150 */
3151static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3152{
3153 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3154 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3155 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3156 barrier();
3157 mmiowb();
3158}
3159
3160/*
3161 * should be run under rtnl lock
3162 */
3163static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3164{
3165 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3166 val |= (1 << 16);
3167 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3168 barrier();
3169 mmiowb();
3170}
3171
3172/*
3173 * should be run under rtnl lock
3174 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003175bool bnx2x_reset_is_done(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003176{
3177 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3178 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3179 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3180}
3181
3182/*
3183 * should be run under rtnl lock
3184 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003185inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003186{
3187 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3188
3189 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3190
3191 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3192 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3193 barrier();
3194 mmiowb();
3195}
3196
3197/*
3198 * should be run under rtnl lock
3199 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003200u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003201{
3202 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3203
3204 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3205
3206 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3207 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3208 barrier();
3209 mmiowb();
3210
3211 return val1;
3212}
3213
3214/*
3215 * should be run under rtnl lock
3216 */
3217static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3218{
3219 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3220}
3221
3222static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3223{
3224 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3225 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3226}
3227
3228static inline void _print_next_block(int idx, const char *blk)
3229{
3230 if (idx)
3231 pr_cont(", ");
3232 pr_cont("%s", blk);
3233}
3234
3235static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3236{
3237 int i = 0;
3238 u32 cur_bit = 0;
3239 for (i = 0; sig; i++) {
3240 cur_bit = ((u32)0x1 << i);
3241 if (sig & cur_bit) {
3242 switch (cur_bit) {
3243 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3244 _print_next_block(par_num++, "BRB");
3245 break;
3246 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3247 _print_next_block(par_num++, "PARSER");
3248 break;
3249 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3250 _print_next_block(par_num++, "TSDM");
3251 break;
3252 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3253 _print_next_block(par_num++, "SEARCHER");
3254 break;
3255 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3256 _print_next_block(par_num++, "TSEMI");
3257 break;
3258 }
3259
3260 /* Clear the bit */
3261 sig &= ~cur_bit;
3262 }
3263 }
3264
3265 return par_num;
3266}
3267
3268static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3269{
3270 int i = 0;
3271 u32 cur_bit = 0;
3272 for (i = 0; sig; i++) {
3273 cur_bit = ((u32)0x1 << i);
3274 if (sig & cur_bit) {
3275 switch (cur_bit) {
3276 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3277 _print_next_block(par_num++, "PBCLIENT");
3278 break;
3279 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3280 _print_next_block(par_num++, "QM");
3281 break;
3282 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3283 _print_next_block(par_num++, "XSDM");
3284 break;
3285 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3286 _print_next_block(par_num++, "XSEMI");
3287 break;
3288 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3289 _print_next_block(par_num++, "DOORBELLQ");
3290 break;
3291 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3292 _print_next_block(par_num++, "VAUX PCI CORE");
3293 break;
3294 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3295 _print_next_block(par_num++, "DEBUG");
3296 break;
3297 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3298 _print_next_block(par_num++, "USDM");
3299 break;
3300 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3301 _print_next_block(par_num++, "USEMI");
3302 break;
3303 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3304 _print_next_block(par_num++, "UPB");
3305 break;
3306 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3307 _print_next_block(par_num++, "CSDM");
3308 break;
3309 }
3310
3311 /* Clear the bit */
3312 sig &= ~cur_bit;
3313 }
3314 }
3315
3316 return par_num;
3317}
3318
3319static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3320{
3321 int i = 0;
3322 u32 cur_bit = 0;
3323 for (i = 0; sig; i++) {
3324 cur_bit = ((u32)0x1 << i);
3325 if (sig & cur_bit) {
3326 switch (cur_bit) {
3327 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3328 _print_next_block(par_num++, "CSEMI");
3329 break;
3330 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3331 _print_next_block(par_num++, "PXP");
3332 break;
3333 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3334 _print_next_block(par_num++,
3335 "PXPPCICLOCKCLIENT");
3336 break;
3337 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3338 _print_next_block(par_num++, "CFC");
3339 break;
3340 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3341 _print_next_block(par_num++, "CDU");
3342 break;
3343 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3344 _print_next_block(par_num++, "IGU");
3345 break;
3346 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3347 _print_next_block(par_num++, "MISC");
3348 break;
3349 }
3350
3351 /* Clear the bit */
3352 sig &= ~cur_bit;
3353 }
3354 }
3355
3356 return par_num;
3357}
3358
3359static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3360{
3361 int i = 0;
3362 u32 cur_bit = 0;
3363 for (i = 0; sig; i++) {
3364 cur_bit = ((u32)0x1 << i);
3365 if (sig & cur_bit) {
3366 switch (cur_bit) {
3367 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3368 _print_next_block(par_num++, "MCP ROM");
3369 break;
3370 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3371 _print_next_block(par_num++, "MCP UMP RX");
3372 break;
3373 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3374 _print_next_block(par_num++, "MCP UMP TX");
3375 break;
3376 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3377 _print_next_block(par_num++, "MCP SCPAD");
3378 break;
3379 }
3380
3381 /* Clear the bit */
3382 sig &= ~cur_bit;
3383 }
3384 }
3385
3386 return par_num;
3387}
3388
3389static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3390 u32 sig2, u32 sig3)
3391{
3392 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3393 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3394 int par_num = 0;
3395 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3396 "[0]:0x%08x [1]:0x%08x "
3397 "[2]:0x%08x [3]:0x%08x\n",
3398 sig0 & HW_PRTY_ASSERT_SET_0,
3399 sig1 & HW_PRTY_ASSERT_SET_1,
3400 sig2 & HW_PRTY_ASSERT_SET_2,
3401 sig3 & HW_PRTY_ASSERT_SET_3);
3402 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3403 bp->dev->name);
3404 par_num = bnx2x_print_blocks_with_parity0(
3405 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3406 par_num = bnx2x_print_blocks_with_parity1(
3407 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3408 par_num = bnx2x_print_blocks_with_parity2(
3409 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3410 par_num = bnx2x_print_blocks_with_parity3(
3411 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3412 printk("\n");
3413 return true;
3414 } else
3415 return false;
3416}
3417
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003418bool bnx2x_chk_parity_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003419{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003420 struct attn_route attn;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003421 int port = BP_PORT(bp);
3422
3423 attn.sig[0] = REG_RD(bp,
3424 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3425 port*4);
3426 attn.sig[1] = REG_RD(bp,
3427 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3428 port*4);
3429 attn.sig[2] = REG_RD(bp,
3430 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3431 port*4);
3432 attn.sig[3] = REG_RD(bp,
3433 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3434 port*4);
3435
3436 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3437 attn.sig[3]);
3438}
3439
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003440
3441static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3442{
3443 u32 val;
3444 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3445
3446 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3447 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3448 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3449 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3450 "ADDRESS_ERROR\n");
3451 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3452 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3453 "INCORRECT_RCV_BEHAVIOR\n");
3454 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3455 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3456 "WAS_ERROR_ATTN\n");
3457 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3458 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3459 "VF_LENGTH_VIOLATION_ATTN\n");
3460 if (val &
3461 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3462 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3463 "VF_GRC_SPACE_VIOLATION_ATTN\n");
3464 if (val &
3465 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3466 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3467 "VF_MSIX_BAR_VIOLATION_ATTN\n");
3468 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3469 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3470 "TCPL_ERROR_ATTN\n");
3471 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3472 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3473 "TCPL_IN_TWO_RCBS_ATTN\n");
3474 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3475 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3476 "CSSNOOP_FIFO_OVERFLOW\n");
3477 }
3478 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3479 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3480 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3481 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3482 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3483 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3484 BNX2X_ERR("ATC_ATC_INT_STS_REG"
3485 "_ATC_TCPL_TO_NOT_PEND\n");
3486 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3487 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3488 "ATC_GPA_MULTIPLE_HITS\n");
3489 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3490 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3491 "ATC_RCPL_TO_EMPTY_CNT\n");
3492 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3493 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3494 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3495 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3496 "ATC_IREQ_LESS_THAN_STU\n");
3497 }
3498
3499 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3500 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3501 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3502 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3503 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3504 }
3505
3506}
3507
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003508static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3509{
3510 struct attn_route attn, *group_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003511 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003512 int index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003513 u32 reg_addr;
3514 u32 val;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003515 u32 aeu_mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003516
3517 /* need to take HW lock because MCP or other port might also
3518 try to handle this event */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003519 bnx2x_acquire_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003520
Vladislav Zolotarov4a33bc02011-01-09 02:20:04 +00003521 if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) {
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003522 bp->recovery_state = BNX2X_RECOVERY_INIT;
3523 bnx2x_set_reset_in_progress(bp);
3524 schedule_delayed_work(&bp->reset_task, 0);
3525 /* Disable HW interrupts */
3526 bnx2x_int_disable(bp);
3527 bnx2x_release_alr(bp);
3528 /* In case of parity errors don't handle attentions so that
3529 * other function would "see" parity errors.
3530 */
3531 return;
3532 }
3533
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003534 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3535 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3536 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3537 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003538 if (CHIP_IS_E2(bp))
3539 attn.sig[4] =
3540 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3541 else
3542 attn.sig[4] = 0;
3543
3544 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3545 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003546
3547 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3548 if (deasserted & (1 << index)) {
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003549 group_mask = &bp->attn_group[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003550
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003551 DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3552 "%08x %08x %08x\n",
3553 index,
3554 group_mask->sig[0], group_mask->sig[1],
3555 group_mask->sig[2], group_mask->sig[3],
3556 group_mask->sig[4]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003557
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003558 bnx2x_attn_int_deasserted4(bp,
3559 attn.sig[4] & group_mask->sig[4]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003560 bnx2x_attn_int_deasserted3(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003561 attn.sig[3] & group_mask->sig[3]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003562 bnx2x_attn_int_deasserted1(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003563 attn.sig[1] & group_mask->sig[1]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003564 bnx2x_attn_int_deasserted2(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003565 attn.sig[2] & group_mask->sig[2]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003566 bnx2x_attn_int_deasserted0(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003567 attn.sig[0] & group_mask->sig[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003568 }
3569 }
3570
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003571 bnx2x_release_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003572
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003573 if (bp->common.int_block == INT_BLOCK_HC)
3574 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3575 COMMAND_REG_ATTN_BITS_CLR);
3576 else
3577 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003578
3579 val = ~deasserted;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003580 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3581 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
Eilon Greenstein5c862842008-08-13 15:51:48 -07003582 REG_WR(bp, reg_addr, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003583
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003584 if (~bp->attn_state & deasserted)
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003585 BNX2X_ERR("IGU ERROR\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003586
3587 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3588 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3589
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003590 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3591 aeu_mask = REG_RD(bp, reg_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003592
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003593 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3594 aeu_mask, deasserted);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003595 aeu_mask |= (deasserted & 0x3ff);
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003596 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3597
3598 REG_WR(bp, reg_addr, aeu_mask);
3599 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003600
3601 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3602 bp->attn_state &= ~deasserted;
3603 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3604}
3605
3606static void bnx2x_attn_int(struct bnx2x *bp)
3607{
3608 /* read local copy of bits */
Eilon Greenstein68d59482009-01-14 21:27:36 -08003609 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3610 attn_bits);
3611 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3612 attn_bits_ack);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003613 u32 attn_state = bp->attn_state;
3614
3615 /* look for changed bits */
3616 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3617 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3618
3619 DP(NETIF_MSG_HW,
3620 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3621 attn_bits, attn_ack, asserted, deasserted);
3622
3623 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003624 BNX2X_ERR("BAD attention state\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003625
3626 /* handle bits that were raised */
3627 if (asserted)
3628 bnx2x_attn_int_asserted(bp, asserted);
3629
3630 if (deasserted)
3631 bnx2x_attn_int_deasserted(bp, deasserted);
3632}
3633
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003634static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3635{
3636 /* No memory barriers */
3637 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3638 mmiowb(); /* keep prod updates ordered */
3639}
3640
3641#ifdef BCM_CNIC
3642static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3643 union event_ring_elem *elem)
3644{
3645 if (!bp->cnic_eth_dev.starting_cid ||
Vladislav Zolotarovc3a8ce62011-05-22 10:08:09 +00003646 (cid < bp->cnic_eth_dev.starting_cid &&
3647 cid != bp->cnic_eth_dev.iscsi_l2_cid))
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003648 return 1;
3649
3650 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3651
3652 if (unlikely(elem->message.data.cfc_del_event.error)) {
3653 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3654 cid);
3655 bnx2x_panic_dump(bp);
3656 }
3657 bnx2x_cnic_cfc_comp(bp, cid);
3658 return 0;
3659}
3660#endif
3661
3662static void bnx2x_eq_int(struct bnx2x *bp)
3663{
3664 u16 hw_cons, sw_cons, sw_prod;
3665 union event_ring_elem *elem;
3666 u32 cid;
3667 u8 opcode;
3668 int spqe_cnt = 0;
3669
3670 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3671
3672 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3673 * when we get the the next-page we nned to adjust so the loop
3674 * condition below will be met. The next element is the size of a
3675 * regular element and hence incrementing by 1
3676 */
3677 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3678 hw_cons++;
3679
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003680 /* This function may never run in parallel with itself for a
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003681 * specific bp, thus there is no need in "paired" read memory
3682 * barrier here.
3683 */
3684 sw_cons = bp->eq_cons;
3685 sw_prod = bp->eq_prod;
3686
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08003687 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->cq_spq_left %u\n",
3688 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003689
3690 for (; sw_cons != hw_cons;
3691 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3692
3693
3694 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3695
3696 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3697 opcode = elem->message.opcode;
3698
3699
3700 /* handle eq element */
3701 switch (opcode) {
3702 case EVENT_RING_OPCODE_STAT_QUERY:
3703 DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3704 /* nothing to do with stats comp */
3705 continue;
3706
3707 case EVENT_RING_OPCODE_CFC_DEL:
3708 /* handle according to cid range */
3709 /*
3710 * we may want to verify here that the bp state is
3711 * HALTING
3712 */
3713 DP(NETIF_MSG_IFDOWN,
3714 "got delete ramrod for MULTI[%d]\n", cid);
3715#ifdef BCM_CNIC
3716 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3717 goto next_spqe;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003718 if (cid == BNX2X_FCOE_ETH_CID)
3719 bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
3720 else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003721#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003722 bnx2x_fp(bp, cid, state) =
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003723 BNX2X_FP_STATE_CLOSED;
3724
3725 goto next_spqe;
Vladislav Zolotarove4901dd2010-12-13 05:44:18 +00003726
3727 case EVENT_RING_OPCODE_STOP_TRAFFIC:
3728 DP(NETIF_MSG_IFUP, "got STOP TRAFFIC\n");
3729 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
3730 goto next_spqe;
3731 case EVENT_RING_OPCODE_START_TRAFFIC:
3732 DP(NETIF_MSG_IFUP, "got START TRAFFIC\n");
3733 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
3734 goto next_spqe;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003735 }
3736
3737 switch (opcode | bp->state) {
3738 case (EVENT_RING_OPCODE_FUNCTION_START |
3739 BNX2X_STATE_OPENING_WAIT4_PORT):
3740 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3741 bp->state = BNX2X_STATE_FUNC_STARTED;
3742 break;
3743
3744 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3745 BNX2X_STATE_CLOSING_WAIT4_HALT):
3746 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3747 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3748 break;
3749
3750 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3751 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3752 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08003753 if (elem->message.data.set_mac_event.echo)
3754 bp->set_mac_pending = 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003755 break;
3756
3757 case (EVENT_RING_OPCODE_SET_MAC |
3758 BNX2X_STATE_CLOSING_WAIT4_HALT):
3759 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08003760 if (elem->message.data.set_mac_event.echo)
3761 bp->set_mac_pending = 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003762 break;
3763 default:
3764 /* unknown event log error and continue */
3765 BNX2X_ERR("Unknown EQ event %d\n",
3766 elem->message.opcode);
3767 }
3768next_spqe:
3769 spqe_cnt++;
3770 } /* for */
3771
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00003772 smp_mb__before_atomic_inc();
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08003773 atomic_add(spqe_cnt, &bp->eq_spq_left);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003774
3775 bp->eq_cons = sw_cons;
3776 bp->eq_prod = sw_prod;
3777 /* Make sure that above mem writes were issued towards the memory */
3778 smp_wmb();
3779
3780 /* update producer */
3781 bnx2x_update_eq_prod(bp, bp->eq_prod);
3782}
3783
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003784static void bnx2x_sp_task(struct work_struct *work)
3785{
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003786 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003787 u16 status;
3788
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003789 status = bnx2x_update_dsb_idx(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003790/* if (status == 0) */
3791/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003792
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003793 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003794
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003795 /* HW attentions */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003796 if (status & BNX2X_DEF_SB_ATT_IDX) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003797 bnx2x_attn_int(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003798 status &= ~BNX2X_DEF_SB_ATT_IDX;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003799 }
3800
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003801 /* SP events: STAT_QUERY and others */
3802 if (status & BNX2X_DEF_SB_IDX) {
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003803#ifdef BCM_CNIC
3804 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003805
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003806 if ((!NO_FCOE(bp)) &&
3807 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp)))
3808 napi_schedule(&bnx2x_fcoe(bp, napi));
3809#endif
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003810 /* Handle EQ completions */
3811 bnx2x_eq_int(bp);
3812
3813 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3814 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3815
3816 status &= ~BNX2X_DEF_SB_IDX;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003817 }
3818
3819 if (unlikely(status))
3820 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3821 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003822
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003823 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3824 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003825}
3826
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003827irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003828{
3829 struct net_device *dev = dev_instance;
3830 struct bnx2x *bp = netdev_priv(dev);
3831
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003832 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3833 IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003834
3835#ifdef BNX2X_STOP_ON_ERROR
3836 if (unlikely(bp->panic))
3837 return IRQ_HANDLED;
3838#endif
3839
Michael Chan993ac7b2009-10-10 13:46:56 +00003840#ifdef BCM_CNIC
3841 {
3842 struct cnic_ops *c_ops;
3843
3844 rcu_read_lock();
3845 c_ops = rcu_dereference(bp->cnic_ops);
3846 if (c_ops)
3847 c_ops->cnic_handler(bp->cnic_data, NULL);
3848 rcu_read_unlock();
3849 }
3850#endif
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003851 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003852
3853 return IRQ_HANDLED;
3854}
3855
3856/* end of slow path */
3857
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003858static void bnx2x_timer(unsigned long data)
3859{
3860 struct bnx2x *bp = (struct bnx2x *) data;
3861
3862 if (!netif_running(bp->dev))
3863 return;
3864
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003865 if (poll) {
3866 struct bnx2x_fastpath *fp = &bp->fp[0];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003867
Eilon Greenstein7961f792009-03-02 07:59:31 +00003868 bnx2x_tx_int(fp);
David S. Millerb8ee8322011-04-17 16:56:12 -07003869 bnx2x_rx_int(fp, 1000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003870 }
3871
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003872 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003873 int mb_idx = BP_FW_MB_IDX(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003874 u32 drv_pulse;
3875 u32 mcp_pulse;
3876
3877 ++bp->fw_drv_pulse_wr_seq;
3878 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3879 /* TBD - add SYSTEM_TIME */
3880 drv_pulse = bp->fw_drv_pulse_wr_seq;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003881 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003882
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003883 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003884 MCP_PULSE_SEQ_MASK);
3885 /* The delta between driver pulse and mcp response
3886 * should be 1 (before mcp response) or 0 (after mcp response)
3887 */
3888 if ((drv_pulse != mcp_pulse) &&
3889 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3890 /* someone lost a heartbeat... */
3891 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3892 drv_pulse, mcp_pulse);
3893 }
3894 }
3895
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07003896 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003897 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003898
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003899 mod_timer(&bp->timer, jiffies + bp->current_interval);
3900}
3901
3902/* end of Statistics */
3903
3904/* nic init */
3905
3906/*
3907 * nic init service functions
3908 */
3909
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003910static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003911{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003912 u32 i;
3913 if (!(len%4) && !(addr%4))
3914 for (i = 0; i < len; i += 4)
3915 REG_WR(bp, addr + i, fill);
3916 else
3917 for (i = 0; i < len; i++)
3918 REG_WR8(bp, addr + i, fill);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003919
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003920}
3921
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003922/* helper: writes FP SP data to FW - data_size in dwords */
3923static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3924 int fw_sb_id,
3925 u32 *sb_data_p,
3926 u32 data_size)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003927{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003928 int index;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003929 for (index = 0; index < data_size; index++)
3930 REG_WR(bp, BAR_CSTRORM_INTMEM +
3931 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3932 sizeof(u32)*index,
3933 *(sb_data_p + index));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003934}
3935
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003936static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3937{
3938 u32 *sb_data_p;
3939 u32 data_size = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003940 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003941 struct hc_status_block_data_e1x sb_data_e1x;
3942
3943 /* disable the function first */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003944 if (CHIP_IS_E2(bp)) {
3945 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3946 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3947 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3948 sb_data_e2.common.p_func.vf_valid = false;
3949 sb_data_p = (u32 *)&sb_data_e2;
3950 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3951 } else {
3952 memset(&sb_data_e1x, 0,
3953 sizeof(struct hc_status_block_data_e1x));
3954 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3955 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3956 sb_data_e1x.common.p_func.vf_valid = false;
3957 sb_data_p = (u32 *)&sb_data_e1x;
3958 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3959 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003960 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3961
3962 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3963 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
3964 CSTORM_STATUS_BLOCK_SIZE);
3965 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3966 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
3967 CSTORM_SYNC_BLOCK_SIZE);
3968}
3969
3970/* helper: writes SP SB data to FW */
3971static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
3972 struct hc_sp_status_block_data *sp_sb_data)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003973{
3974 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003975 int i;
3976 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
3977 REG_WR(bp, BAR_CSTRORM_INTMEM +
3978 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
3979 i*sizeof(u32),
3980 *((u32 *)sp_sb_data + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003981}
3982
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003983static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
3984{
3985 int func = BP_FUNC(bp);
3986 struct hc_sp_status_block_data sp_sb_data;
3987 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
3988
3989 sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
3990 sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
3991 sp_sb_data.p_func.vf_valid = false;
3992
3993 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
3994
3995 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3996 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
3997 CSTORM_SP_STATUS_BLOCK_SIZE);
3998 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3999 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
4000 CSTORM_SP_SYNC_BLOCK_SIZE);
4001
4002}
4003
4004
4005static inline
4006void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
4007 int igu_sb_id, int igu_seg_id)
4008{
4009 hc_sm->igu_sb_id = igu_sb_id;
4010 hc_sm->igu_seg_id = igu_seg_id;
4011 hc_sm->timer_value = 0xFF;
4012 hc_sm->time_to_expire = 0xFFFFFFFF;
4013}
4014
stephen hemminger8d962862010-10-21 07:50:56 +00004015static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004016 u8 vf_valid, int fw_sb_id, int igu_sb_id)
4017{
4018 int igu_seg_id;
4019
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004020 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004021 struct hc_status_block_data_e1x sb_data_e1x;
4022 struct hc_status_block_sm *hc_sm_p;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004023 int data_size;
4024 u32 *sb_data_p;
4025
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004026 if (CHIP_INT_MODE_IS_BC(bp))
4027 igu_seg_id = HC_SEG_ACCESS_NORM;
4028 else
4029 igu_seg_id = IGU_SEG_ACCESS_NORM;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004030
4031 bnx2x_zero_fp_sb(bp, fw_sb_id);
4032
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004033 if (CHIP_IS_E2(bp)) {
4034 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
4035 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
4036 sb_data_e2.common.p_func.vf_id = vfid;
4037 sb_data_e2.common.p_func.vf_valid = vf_valid;
4038 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
4039 sb_data_e2.common.same_igu_sb_1b = true;
4040 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
4041 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
4042 hc_sm_p = sb_data_e2.common.state_machine;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004043 sb_data_p = (u32 *)&sb_data_e2;
4044 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
4045 } else {
4046 memset(&sb_data_e1x, 0,
4047 sizeof(struct hc_status_block_data_e1x));
4048 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
4049 sb_data_e1x.common.p_func.vf_id = 0xff;
4050 sb_data_e1x.common.p_func.vf_valid = false;
4051 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
4052 sb_data_e1x.common.same_igu_sb_1b = true;
4053 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
4054 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
4055 hc_sm_p = sb_data_e1x.common.state_machine;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004056 sb_data_p = (u32 *)&sb_data_e1x;
4057 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
4058 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004059
4060 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
4061 igu_sb_id, igu_seg_id);
4062 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
4063 igu_sb_id, igu_seg_id);
4064
4065 DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
4066
4067 /* write indecies to HW */
4068 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
4069}
4070
4071static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
4072 u8 sb_index, u8 disable, u16 usec)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004073{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004074 int port = BP_PORT(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004075 u8 ticks = usec / BNX2X_BTR;
4076
4077 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4078
4079 disable = disable ? 1 : (usec ? 0 : 1);
4080 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4081}
4082
4083static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
4084 u16 tx_usec, u16 rx_usec)
4085{
4086 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
4087 false, rx_usec);
4088 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
4089 false, tx_usec);
4090}
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004091
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004092static void bnx2x_init_def_sb(struct bnx2x *bp)
4093{
4094 struct host_sp_status_block *def_sb = bp->def_status_blk;
4095 dma_addr_t mapping = bp->def_status_blk_mapping;
4096 int igu_sp_sb_index;
4097 int igu_seg_id;
4098 int port = BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004099 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004100 int reg_offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004101 u64 section;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004102 int index;
4103 struct hc_sp_status_block_data sp_sb_data;
4104 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4105
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004106 if (CHIP_INT_MODE_IS_BC(bp)) {
4107 igu_sp_sb_index = DEF_SB_IGU_ID;
4108 igu_seg_id = HC_SEG_ACCESS_DEF;
4109 } else {
4110 igu_sp_sb_index = bp->igu_dsb_id;
4111 igu_seg_id = IGU_SEG_ACCESS_DEF;
4112 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004113
4114 /* ATTN */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004115 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004116 atten_status_block);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004117 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004118
Eliezer Tamir49d66772008-02-28 11:53:13 -08004119 bp->attn_state = 0;
4120
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004121 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4122 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004123 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004124 int sindex;
4125 /* take care of sig[0]..sig[4] */
4126 for (sindex = 0; sindex < 4; sindex++)
4127 bp->attn_group[index].sig[sindex] =
4128 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004129
4130 if (CHIP_IS_E2(bp))
4131 /*
4132 * enable5 is separate from the rest of the registers,
4133 * and therefore the address skip is 4
4134 * and not 16 between the different groups
4135 */
4136 bp->attn_group[index].sig[4] = REG_RD(bp,
4137 reg_offset + 0x10 + 0x4*index);
4138 else
4139 bp->attn_group[index].sig[4] = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004140 }
4141
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004142 if (bp->common.int_block == INT_BLOCK_HC) {
4143 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4144 HC_REG_ATTN_MSG0_ADDR_L);
4145
4146 REG_WR(bp, reg_offset, U64_LO(section));
4147 REG_WR(bp, reg_offset + 4, U64_HI(section));
4148 } else if (CHIP_IS_E2(bp)) {
4149 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4150 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4151 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004152
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004153 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4154 sp_sb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004155
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004156 bnx2x_zero_sp_sb(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004157
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004158 sp_sb_data.host_sb_addr.lo = U64_LO(section);
4159 sp_sb_data.host_sb_addr.hi = U64_HI(section);
4160 sp_sb_data.igu_sb_id = igu_sp_sb_index;
4161 sp_sb_data.igu_seg_id = igu_seg_id;
4162 sp_sb_data.p_func.pf_id = func;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004163 sp_sb_data.p_func.vnic_id = BP_VN(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004164 sp_sb_data.p_func.vf_id = 0xff;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004165
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004166 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004167
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004168 bp->stats_pending = 0;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004169 bp->set_mac_pending = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004170
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004171 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004172}
4173
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004174void bnx2x_update_coalesce(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004175{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004176 int i;
4177
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004178 for_each_eth_queue(bp, i)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004179 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
Ariel Elior423cfa7e2011-03-14 13:43:22 -07004180 bp->tx_ticks, bp->rx_ticks);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004181}
4182
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004183static void bnx2x_init_sp_ring(struct bnx2x *bp)
4184{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004185 spin_lock_init(&bp->spq_lock);
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08004186 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004187
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004188 bp->spq_prod_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004189 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4190 bp->spq_prod_bd = bp->spq;
4191 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004192}
4193
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004194static void bnx2x_init_eq_ring(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004195{
4196 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004197 for (i = 1; i <= NUM_EQ_PAGES; i++) {
4198 union event_ring_elem *elem =
4199 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004200
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004201 elem->next_page.addr.hi =
4202 cpu_to_le32(U64_HI(bp->eq_mapping +
4203 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
4204 elem->next_page.addr.lo =
4205 cpu_to_le32(U64_LO(bp->eq_mapping +
4206 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004207 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004208 bp->eq_cons = 0;
4209 bp->eq_prod = NUM_EQ_DESC;
4210 bp->eq_cons_sb = BNX2X_EQ_INDEX;
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08004211 /* we want a warning message before it gets rought... */
4212 atomic_set(&bp->eq_spq_left,
4213 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004214}
4215
Tom Herbertab532cf2011-02-16 10:27:02 +00004216void bnx2x_push_indir_table(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004217{
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08004218 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004219 int i;
4220
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004221 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004222 return;
4223
4224 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004225 REG_WR8(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08004226 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
Tom Herbertab532cf2011-02-16 10:27:02 +00004227 bp->fp->cl_id + bp->rx_indir_table[i]);
4228}
4229
4230static void bnx2x_init_ind_table(struct bnx2x *bp)
4231{
4232 int i;
4233
4234 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4235 bp->rx_indir_table[i] = i % BNX2X_NUM_ETH_QUEUES(bp);
4236
4237 bnx2x_push_indir_table(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004238}
4239
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004240void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004241{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004242 int mode = bp->rx_mode;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004243 int port = BP_PORT(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004244 u16 cl_id;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004245 u32 def_q_filters = 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004246
Eilon Greenstein581ce432009-07-29 00:20:04 +00004247 /* All but management unicast packets should pass to the host as well */
4248 u32 llh_mask =
4249 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4250 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4251 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4252 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004253
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004254 switch (mode) {
4255 case BNX2X_RX_MODE_NONE: /* no Rx */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004256 def_q_filters = BNX2X_ACCEPT_NONE;
4257#ifdef BCM_CNIC
4258 if (!NO_FCOE(bp)) {
4259 cl_id = bnx2x_fcoe(bp, cl_id);
4260 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4261 }
4262#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004263 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004264
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004265 case BNX2X_RX_MODE_NORMAL:
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004266 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4267 BNX2X_ACCEPT_MULTICAST;
4268#ifdef BCM_CNIC
Vladislav Zolotarov711c9142011-02-06 11:21:49 -08004269 if (!NO_FCOE(bp)) {
4270 cl_id = bnx2x_fcoe(bp, cl_id);
4271 bnx2x_rxq_set_mac_filters(bp, cl_id,
4272 BNX2X_ACCEPT_UNICAST |
4273 BNX2X_ACCEPT_MULTICAST);
4274 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004275#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004276 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004277
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004278 case BNX2X_RX_MODE_ALLMULTI:
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004279 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4280 BNX2X_ACCEPT_ALL_MULTICAST;
4281#ifdef BCM_CNIC
Vladislav Zolotarov711c9142011-02-06 11:21:49 -08004282 /*
4283 * Prevent duplication of multicast packets by configuring FCoE
4284 * L2 Client to receive only matched unicast frames.
4285 */
4286 if (!NO_FCOE(bp)) {
4287 cl_id = bnx2x_fcoe(bp, cl_id);
4288 bnx2x_rxq_set_mac_filters(bp, cl_id,
4289 BNX2X_ACCEPT_UNICAST);
4290 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004291#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004292 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004293
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004294 case BNX2X_RX_MODE_PROMISC:
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004295 def_q_filters |= BNX2X_PROMISCUOUS_MODE;
4296#ifdef BCM_CNIC
Vladislav Zolotarov711c9142011-02-06 11:21:49 -08004297 /*
4298 * Prevent packets duplication by configuring DROP_ALL for FCoE
4299 * L2 Client.
4300 */
4301 if (!NO_FCOE(bp)) {
4302 cl_id = bnx2x_fcoe(bp, cl_id);
4303 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4304 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004305#endif
Eilon Greenstein581ce432009-07-29 00:20:04 +00004306 /* pass management unicast packets as well */
4307 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004308 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004309
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004310 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004311 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4312 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004313 }
4314
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004315 cl_id = BP_L_ID(bp);
4316 bnx2x_rxq_set_mac_filters(bp, cl_id, def_q_filters);
4317
Eilon Greenstein581ce432009-07-29 00:20:04 +00004318 REG_WR(bp,
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004319 (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
4320 NIG_REG_LLH0_BRB1_DRV_MASK), llh_mask);
Eilon Greenstein581ce432009-07-29 00:20:04 +00004321
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004322 DP(NETIF_MSG_IFUP, "rx mode %d\n"
4323 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004324 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n"
4325 "unmatched_ucast 0x%x\n", mode,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004326 bp->mac_filters.ucast_drop_all,
4327 bp->mac_filters.mcast_drop_all,
4328 bp->mac_filters.bcast_drop_all,
4329 bp->mac_filters.ucast_accept_all,
4330 bp->mac_filters.mcast_accept_all,
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004331 bp->mac_filters.bcast_accept_all,
4332 bp->mac_filters.unmatched_unicast
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004333 );
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004334
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004335 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004336}
4337
Eilon Greenstein471de712008-08-13 15:49:35 -07004338static void bnx2x_init_internal_common(struct bnx2x *bp)
4339{
4340 int i;
4341
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004342 if (!CHIP_IS_E1(bp)) {
4343
4344 /* xstorm needs to know whether to add ovlan to packets or not,
4345 * in switch-independent we'll write 0 to here... */
4346 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004347 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004348 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004349 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004350 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004351 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004352 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004353 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004354 }
4355
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08004356 if (IS_MF_SI(bp))
4357 /*
4358 * In switch independent mode, the TSTORM needs to accept
4359 * packets that failed classification, since approximate match
4360 * mac addresses aren't written to NIG LLH
4361 */
4362 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4363 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
4364
Eilon Greenstein471de712008-08-13 15:49:35 -07004365 /* Zero this manually as its initialization is
4366 currently missing in the initTool */
4367 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4368 REG_WR(bp, BAR_USTRORM_INTMEM +
4369 USTORM_AGG_DATA_OFFSET + i * 4, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004370 if (CHIP_IS_E2(bp)) {
4371 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
4372 CHIP_INT_MODE_IS_BC(bp) ?
4373 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
4374 }
Eilon Greenstein471de712008-08-13 15:49:35 -07004375}
4376
4377static void bnx2x_init_internal_port(struct bnx2x *bp)
4378{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004379 /* port */
Vladislav Zolotarove4901dd2010-12-13 05:44:18 +00004380 bnx2x_dcb_init_intmem_pfc(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004381}
4382
Eilon Greenstein471de712008-08-13 15:49:35 -07004383static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4384{
4385 switch (load_code) {
4386 case FW_MSG_CODE_DRV_LOAD_COMMON:
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004387 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
Eilon Greenstein471de712008-08-13 15:49:35 -07004388 bnx2x_init_internal_common(bp);
4389 /* no break */
4390
4391 case FW_MSG_CODE_DRV_LOAD_PORT:
4392 bnx2x_init_internal_port(bp);
4393 /* no break */
4394
4395 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004396 /* internal memory per function is
4397 initialized inside bnx2x_pf_init */
Eilon Greenstein471de712008-08-13 15:49:35 -07004398 break;
4399
4400 default:
4401 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4402 break;
4403 }
4404}
4405
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004406static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
4407{
4408 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
4409
4410 fp->state = BNX2X_FP_STATE_CLOSED;
4411
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004412 fp->cid = fp_idx;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004413 fp->cl_id = BP_L_ID(bp) + fp_idx;
4414 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
4415 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
4416 /* qZone id equals to FW (per path) client id */
4417 fp->cl_qzone_id = fp->cl_id +
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004418 BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4419 ETH_MAX_RX_CLIENTS_E1H);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004420 /* init shortcut */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004421 fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4422 USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004423 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4424 /* Setup SB indicies */
4425 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4426 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4427
4428 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
4429 "cl_id %d fw_sb %d igu_sb %d\n",
4430 fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4431 fp->igu_sb_id);
4432 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4433 fp->fw_sb_id, fp->igu_sb_id);
4434
4435 bnx2x_update_fpsb_idx(fp);
4436}
4437
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004438void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004439{
4440 int i;
4441
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004442 for_each_eth_queue(bp, i)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004443 bnx2x_init_fp_sb(bp, i);
Michael Chan37b091b2009-10-10 13:46:55 +00004444#ifdef BCM_CNIC
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004445 if (!NO_FCOE(bp))
4446 bnx2x_init_fcoe_fp(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004447
4448 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4449 BNX2X_VF_ID_INVALID, false,
4450 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4451
Michael Chan37b091b2009-10-10 13:46:55 +00004452#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004453
Yaniv Rosner020c7e32011-05-31 21:28:43 +00004454 /* Initialize MOD_ABS interrupts */
4455 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
4456 bp->common.shmem_base, bp->common.shmem2_base,
4457 BP_PORT(bp));
Eilon Greenstein16119782009-03-02 07:59:27 +00004458 /* ensure status block indices were read */
4459 rmb();
4460
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004461 bnx2x_init_def_sb(bp);
Eilon Greenstein5c862842008-08-13 15:51:48 -07004462 bnx2x_update_dsb_idx(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004463 bnx2x_init_rx_rings(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004464 bnx2x_init_tx_rings(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004465 bnx2x_init_sp_ring(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004466 bnx2x_init_eq_ring(bp);
Eilon Greenstein471de712008-08-13 15:49:35 -07004467 bnx2x_init_internal(bp, load_code);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004468 bnx2x_pf_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004469 bnx2x_init_ind_table(bp);
Eilon Greenstein0ef00452009-01-14 21:31:08 -08004470 bnx2x_stats_init(bp);
4471
Eilon Greenstein0ef00452009-01-14 21:31:08 -08004472 /* flush all before enabling interrupts */
4473 mb();
4474 mmiowb();
4475
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08004476 bnx2x_int_enable(bp);
Eilon Greensteineb8da202009-07-21 05:47:30 +00004477
4478 /* Check for SPIO5 */
4479 bnx2x_attn_int_deasserted0(bp,
4480 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4481 AEU_INPUTS_ATTN_BITS_SPIO5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004482}
4483
4484/* end of nic init */
4485
4486/*
4487 * gzip service functions
4488 */
4489
4490static int bnx2x_gunzip_init(struct bnx2x *bp)
4491{
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004492 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4493 &bp->gunzip_mapping, GFP_KERNEL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004494 if (bp->gunzip_buf == NULL)
4495 goto gunzip_nomem1;
4496
4497 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4498 if (bp->strm == NULL)
4499 goto gunzip_nomem2;
4500
4501 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4502 GFP_KERNEL);
4503 if (bp->strm->workspace == NULL)
4504 goto gunzip_nomem3;
4505
4506 return 0;
4507
4508gunzip_nomem3:
4509 kfree(bp->strm);
4510 bp->strm = NULL;
4511
4512gunzip_nomem2:
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004513 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4514 bp->gunzip_mapping);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004515 bp->gunzip_buf = NULL;
4516
4517gunzip_nomem1:
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004518 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4519 " un-compression\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004520 return -ENOMEM;
4521}
4522
4523static void bnx2x_gunzip_end(struct bnx2x *bp)
4524{
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004525 if (bp->strm) {
4526 kfree(bp->strm->workspace);
4527 kfree(bp->strm);
4528 bp->strm = NULL;
4529 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004530
4531 if (bp->gunzip_buf) {
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004532 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4533 bp->gunzip_mapping);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004534 bp->gunzip_buf = NULL;
4535 }
4536}
4537
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004538static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004539{
4540 int n, rc;
4541
4542 /* check gzip header */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004543 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4544 BNX2X_ERR("Bad gzip header\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004545 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004546 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004547
4548 n = 10;
4549
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004550#define FNAME 0x8
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004551
4552 if (zbuf[3] & FNAME)
4553 while ((zbuf[n++] != 0) && (n < len));
4554
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004555 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004556 bp->strm->avail_in = len - n;
4557 bp->strm->next_out = bp->gunzip_buf;
4558 bp->strm->avail_out = FW_BUF_SIZE;
4559
4560 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4561 if (rc != Z_OK)
4562 return rc;
4563
4564 rc = zlib_inflate(bp->strm, Z_FINISH);
4565 if ((rc != Z_OK) && (rc != Z_STREAM_END))
Joe Perches7995c642010-02-17 15:01:52 +00004566 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4567 bp->strm->msg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004568
4569 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4570 if (bp->gunzip_outlen & 0x3)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004571 netdev_err(bp->dev, "Firmware decompression error:"
4572 " gunzip_outlen (%d) not aligned\n",
4573 bp->gunzip_outlen);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004574 bp->gunzip_outlen >>= 2;
4575
4576 zlib_inflateEnd(bp->strm);
4577
4578 if (rc == Z_STREAM_END)
4579 return 0;
4580
4581 return rc;
4582}
4583
4584/* nic load/unload */
4585
4586/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004587 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004588 */
4589
4590/* send a NIG loopback debug packet */
4591static void bnx2x_lb_pckt(struct bnx2x *bp)
4592{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004593 u32 wb_write[3];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004594
4595 /* Ethernet source and destination addresses */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004596 wb_write[0] = 0x55555555;
4597 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004598 wb_write[2] = 0x20; /* SOP */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004599 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004600
4601 /* NON-IP protocol */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004602 wb_write[0] = 0x09000000;
4603 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004604 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004605 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004606}
4607
4608/* some of the internal memories
4609 * are not directly readable from the driver
4610 * to test them we send debug packets
4611 */
4612static int bnx2x_int_mem_test(struct bnx2x *bp)
4613{
4614 int factor;
4615 int count, i;
4616 u32 val = 0;
4617
Eilon Greensteinad8d3942008-06-23 20:29:02 -07004618 if (CHIP_REV_IS_FPGA(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004619 factor = 120;
Eilon Greensteinad8d3942008-06-23 20:29:02 -07004620 else if (CHIP_REV_IS_EMUL(bp))
4621 factor = 200;
4622 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004623 factor = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004624
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004625 /* Disable inputs of parser neighbor blocks */
4626 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4627 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4628 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004629 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004630
4631 /* Write 0 to parser credits for CFC search request */
4632 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4633
4634 /* send Ethernet packet */
4635 bnx2x_lb_pckt(bp);
4636
4637 /* TODO do i reset NIG statistic? */
4638 /* Wait until NIG register shows 1 packet of size 0x10 */
4639 count = 1000 * factor;
4640 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004641
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004642 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4643 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004644 if (val == 0x10)
4645 break;
4646
4647 msleep(10);
4648 count--;
4649 }
4650 if (val != 0x10) {
4651 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4652 return -1;
4653 }
4654
4655 /* Wait until PRS register shows 1 packet */
4656 count = 1000 * factor;
4657 while (count) {
4658 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004659 if (val == 1)
4660 break;
4661
4662 msleep(10);
4663 count--;
4664 }
4665 if (val != 0x1) {
4666 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4667 return -2;
4668 }
4669
4670 /* Reset and init BRB, PRS */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004671 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004672 msleep(50);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004673 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004674 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004675 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4676 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004677
4678 DP(NETIF_MSG_HW, "part2\n");
4679
4680 /* Disable inputs of parser neighbor blocks */
4681 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4682 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4683 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004684 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004685
4686 /* Write 0 to parser credits for CFC search request */
4687 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4688
4689 /* send 10 Ethernet packets */
4690 for (i = 0; i < 10; i++)
4691 bnx2x_lb_pckt(bp);
4692
4693 /* Wait until NIG register shows 10 + 1
4694 packets of size 11*0x10 = 0xb0 */
4695 count = 1000 * factor;
4696 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004697
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004698 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4699 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004700 if (val == 0xb0)
4701 break;
4702
4703 msleep(10);
4704 count--;
4705 }
4706 if (val != 0xb0) {
4707 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4708 return -3;
4709 }
4710
4711 /* Wait until PRS register shows 2 packets */
4712 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4713 if (val != 2)
4714 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4715
4716 /* Write 1 to parser credits for CFC search request */
4717 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4718
4719 /* Wait until PRS register shows 3 packets */
4720 msleep(10 * factor);
4721 /* Wait until NIG register shows 1 packet of size 0x10 */
4722 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4723 if (val != 3)
4724 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4725
4726 /* clear NIG EOP FIFO */
4727 for (i = 0; i < 11; i++)
4728 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4729 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4730 if (val != 1) {
4731 BNX2X_ERR("clear of NIG failed\n");
4732 return -4;
4733 }
4734
4735 /* Reset and init BRB, PRS, NIG */
4736 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4737 msleep(50);
4738 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4739 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004740 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4741 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00004742#ifndef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004743 /* set NIC mode */
4744 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4745#endif
4746
4747 /* Enable inputs of parser neighbor blocks */
4748 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4749 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4750 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004751 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004752
4753 DP(NETIF_MSG_HW, "done\n");
4754
4755 return 0; /* OK */
4756}
4757
Vladislav Zolotarov4a33bc02011-01-09 02:20:04 +00004758static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004759{
4760 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004761 if (CHIP_IS_E2(bp))
4762 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4763 else
4764 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004765 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4766 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004767 /*
4768 * mask read length error interrupts in brb for parser
4769 * (parsing unit and 'checksum and crc' unit)
4770 * these errors are legal (PU reads fixed length and CAC can cause
4771 * read length error on truncated packets)
4772 */
4773 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004774 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4775 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4776 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4777 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4778 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004779/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4780/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004781 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4782 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4783 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004784/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4785/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004786 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4787 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4788 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4789 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004790/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4791/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004792
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004793 if (CHIP_REV_IS_FPGA(bp))
4794 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004795 else if (CHIP_IS_E2(bp))
4796 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4797 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4798 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4799 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4800 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4801 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004802 else
4803 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004804 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4805 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4806 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004807/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4808/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004809 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4810 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004811/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
Vladislav Zolotarov4a33bc02011-01-09 02:20:04 +00004812 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004813}
4814
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00004815static void bnx2x_reset_common(struct bnx2x *bp)
4816{
4817 /* reset_common */
4818 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4819 0xd3ffff7f);
4820 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4821}
4822
Eilon Greenstein573f2032009-08-12 08:24:14 +00004823static void bnx2x_init_pxp(struct bnx2x *bp)
4824{
4825 u16 devctl;
4826 int r_order, w_order;
4827
4828 pci_read_config_word(bp->pdev,
4829 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4830 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4831 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4832 if (bp->mrrs == -1)
4833 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4834 else {
4835 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4836 r_order = bp->mrrs;
4837 }
4838
4839 bnx2x_init_pxp_arb(bp, r_order, w_order);
4840}
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004841
4842static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4843{
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004844 int is_required;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004845 u32 val;
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004846 int port;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004847
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004848 if (BP_NOMCP(bp))
4849 return;
4850
4851 is_required = 0;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004852 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4853 SHARED_HW_CFG_FAN_FAILURE_MASK;
4854
4855 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4856 is_required = 1;
4857
4858 /*
4859 * The fan failure mechanism is usually related to the PHY type since
4860 * the power consumption of the board is affected by the PHY. Currently,
4861 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4862 */
4863 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4864 for (port = PORT_0; port < PORT_MAX; port++) {
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004865 is_required |=
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004866 bnx2x_fan_failure_det_req(
4867 bp,
4868 bp->common.shmem_base,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00004869 bp->common.shmem2_base,
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004870 port);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004871 }
4872
4873 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4874
4875 if (is_required == 0)
4876 return;
4877
4878 /* Fan failure is indicated by SPIO 5 */
4879 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4880 MISC_REGISTERS_SPIO_INPUT_HI_Z);
4881
4882 /* set to active low mode */
4883 val = REG_RD(bp, MISC_REG_SPIO_INT);
4884 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004885 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004886 REG_WR(bp, MISC_REG_SPIO_INT, val);
4887
4888 /* enable interrupt to signal the IGU */
4889 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4890 val |= (1 << MISC_REGISTERS_SPIO_5);
4891 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4892}
4893
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004894static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4895{
4896 u32 offset = 0;
4897
4898 if (CHIP_IS_E1(bp))
4899 return;
4900 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4901 return;
4902
4903 switch (BP_ABS_FUNC(bp)) {
4904 case 0:
4905 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4906 break;
4907 case 1:
4908 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4909 break;
4910 case 2:
4911 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4912 break;
4913 case 3:
4914 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4915 break;
4916 case 4:
4917 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4918 break;
4919 case 5:
4920 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4921 break;
4922 case 6:
4923 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4924 break;
4925 case 7:
4926 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4927 break;
4928 default:
4929 return;
4930 }
4931
4932 REG_WR(bp, offset, pretend_func_num);
4933 REG_RD(bp, offset);
4934 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4935}
4936
4937static void bnx2x_pf_disable(struct bnx2x *bp)
4938{
4939 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4940 val &= ~IGU_PF_CONF_FUNC_EN;
4941
4942 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4943 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4944 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4945}
4946
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004947static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004948{
4949 u32 val, i;
4950
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004951 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004952
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00004953 bnx2x_reset_common(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004954 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4955 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
4956
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004957 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004958 if (!CHIP_IS_E1(bp))
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004959 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004960
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004961 if (CHIP_IS_E2(bp)) {
4962 u8 fid;
4963
4964 /**
4965 * 4-port mode or 2-port mode we need to turn of master-enable
4966 * for everyone, after that, turn it back on for self.
4967 * so, we disregard multi-function or not, and always disable
4968 * for all functions on the given path, this means 0,2,4,6 for
4969 * path 0 and 1,3,5,7 for path 1
4970 */
4971 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
4972 if (fid == BP_ABS_FUNC(bp)) {
4973 REG_WR(bp,
4974 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
4975 1);
4976 continue;
4977 }
4978
4979 bnx2x_pretend_func(bp, fid);
4980 /* clear pf enable */
4981 bnx2x_pf_disable(bp);
4982 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4983 }
4984 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004985
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004986 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004987 if (CHIP_IS_E1(bp)) {
4988 /* enable HW interrupt from PXP on USDM overflow
4989 bit 16 on INT_MASK_0 */
4990 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004991 }
4992
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004993 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004994 bnx2x_init_pxp(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004995
4996#ifdef __BIG_ENDIAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004997 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
4998 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
4999 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5000 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5001 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
Eilon Greenstein8badd272009-02-12 08:36:15 +00005002 /* make sure this value is 0 */
5003 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005004
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005005/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5006 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5007 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5008 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5009 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005010#endif
5011
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005012 bnx2x_ilt_init_page_size(bp, INITOP_SET);
5013
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005014 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5015 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005016
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005017 /* let the HW do it's magic ... */
5018 msleep(100);
5019 /* finish PXP init */
5020 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5021 if (val != 1) {
5022 BNX2X_ERR("PXP2 CFG failed\n");
5023 return -EBUSY;
5024 }
5025 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5026 if (val != 1) {
5027 BNX2X_ERR("PXP2 RD_INIT failed\n");
5028 return -EBUSY;
5029 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005030
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005031 /* Timers bug workaround E2 only. We need to set the entire ILT to
5032 * have entries with value "0" and valid bit on.
5033 * This needs to be done by the first PF that is loaded in a path
5034 * (i.e. common phase)
5035 */
5036 if (CHIP_IS_E2(bp)) {
5037 struct ilt_client_info ilt_cli;
5038 struct bnx2x_ilt ilt;
5039 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
5040 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
5041
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04005042 /* initialize dummy TM client */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005043 ilt_cli.start = 0;
5044 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
5045 ilt_cli.client_num = ILT_CLIENT_TM;
5046
5047 /* Step 1: set zeroes to all ilt page entries with valid bit on
5048 * Step 2: set the timers first/last ilt entry to point
5049 * to the entire range to prevent ILT range error for 3rd/4th
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005050 * vnic (this code assumes existence of the vnic)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005051 *
5052 * both steps performed by call to bnx2x_ilt_client_init_op()
5053 * with dummy TM client
5054 *
5055 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
5056 * and his brother are split registers
5057 */
5058 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
5059 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
5060 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
5061
5062 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
5063 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
5064 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
5065 }
5066
5067
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005068 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5069 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005070
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005071 if (CHIP_IS_E2(bp)) {
5072 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
5073 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
5074 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
5075
5076 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
5077
5078 /* let the HW do it's magic ... */
5079 do {
5080 msleep(200);
5081 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
5082 } while (factor-- && (val != 1));
5083
5084 if (val != 1) {
5085 BNX2X_ERR("ATC_INIT failed\n");
5086 return -EBUSY;
5087 }
5088 }
5089
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005090 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005091
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005092 /* clean the DMAE memory */
5093 bp->dmae_ready = 1;
5094 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005095
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005096 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5097 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5098 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5099 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005100
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005101 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5102 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5103 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5104 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5105
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005106 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00005107
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005108 if (CHIP_MODE_IS_4_PORT(bp))
5109 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005110
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005111 /* QM queues pointers table */
5112 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
Michael Chan37b091b2009-10-10 13:46:55 +00005113
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005114 /* soft reset pulse */
5115 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5116 REG_WR(bp, QM_REG_SOFT_RESET, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005117
Michael Chan37b091b2009-10-10 13:46:55 +00005118#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005119 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005120#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005121
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005122 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005123 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
5124
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005125 if (!CHIP_REV_IS_SLOW(bp)) {
5126 /* enable hw interrupt from doorbell Q */
5127 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5128 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005129
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005130 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005131 if (CHIP_MODE_IS_4_PORT(bp)) {
5132 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5133 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5134 }
5135
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005136 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08005137 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
Michael Chan37b091b2009-10-10 13:46:55 +00005138#ifndef BCM_CNIC
Eilon Greenstein3196a882008-08-13 15:58:49 -07005139 /* set NIC mode */
5140 REG_WR(bp, PRS_REG_NIC_MODE, 1);
Michael Chan37b091b2009-10-10 13:46:55 +00005141#endif
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005142 if (!CHIP_IS_E1(bp))
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005143 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF_SD(bp));
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005144
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005145 if (CHIP_IS_E2(bp)) {
5146 /* Bit-map indicating which L2 hdrs may appear after the
5147 basic Ethernet header */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005148 int has_ovlan = IS_MF_SD(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005149 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5150 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5151 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005152
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005153 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5154 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5155 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5156 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005157
Eilon Greensteinca003922009-08-12 22:53:28 -07005158 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5159 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5160 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5161 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005162
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005163 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5164 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5165 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5166 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005167
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005168 if (CHIP_MODE_IS_4_PORT(bp))
5169 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5170
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005171 /* sync semi rtc */
5172 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5173 0x80000000);
5174 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5175 0x80000000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005176
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005177 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5178 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5179 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005180
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005181 if (CHIP_IS_E2(bp)) {
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005182 int has_ovlan = IS_MF_SD(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005183 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5184 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5185 }
5186
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005187 REG_WR(bp, SRC_REG_SOFT_RST, 1);
Tom Herbertc68ed252010-04-23 00:10:52 -07005188 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5189 REG_WR(bp, i, random32());
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005190
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005191 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00005192#ifdef BCM_CNIC
5193 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
5194 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
5195 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
5196 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
5197 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
5198 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
5199 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
5200 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
5201 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
5202 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
5203#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005204 REG_WR(bp, SRC_REG_SOFT_RST, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005205
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005206 if (sizeof(union cdu_context) != 1024)
5207 /* we currently assume that a context is 1024 bytes */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005208 dev_alert(&bp->pdev->dev, "please adjust the size "
5209 "of cdu_context(%ld)\n",
Joe Perches7995c642010-02-17 15:01:52 +00005210 (long)sizeof(union cdu_context));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005211
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005212 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005213 val = (4 << 24) + (0 << 12) + 1024;
5214 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005215
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005216 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005217 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005218 /* enable context validation interrupt from CFC */
5219 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5220
5221 /* set the thresholds to prevent CFC/CDU race */
5222 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005223
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005224 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005225
5226 if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5227 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5228
5229 bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005230 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005231
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005232 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005233 /* Reset PCIE errors for debug */
5234 REG_WR(bp, 0x2814, 0xffffffff);
5235 REG_WR(bp, 0x3820, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005236
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005237 if (CHIP_IS_E2(bp)) {
5238 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5239 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5240 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5241 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5242 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5243 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5244 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5245 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5246 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5247 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5248 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5249 }
5250
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005251 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005252 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005253 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005254 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005255
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005256 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005257 if (!CHIP_IS_E1(bp)) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005258 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005259 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005260 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005261 if (CHIP_IS_E2(bp)) {
5262 /* Bit-map indicating which L2 hdrs may appear after the
5263 basic Ethernet header */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005264 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF_SD(bp) ? 7 : 6));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005265 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005266
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005267 if (CHIP_REV_IS_SLOW(bp))
5268 msleep(200);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005269
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005270 /* finish CFC init */
5271 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5272 if (val != 1) {
5273 BNX2X_ERR("CFC LL_INIT failed\n");
5274 return -EBUSY;
5275 }
5276 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5277 if (val != 1) {
5278 BNX2X_ERR("CFC AC_INIT failed\n");
5279 return -EBUSY;
5280 }
5281 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5282 if (val != 1) {
5283 BNX2X_ERR("CFC CAM_INIT failed\n");
5284 return -EBUSY;
5285 }
5286 REG_WR(bp, CFC_REG_DEBUG0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005287
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005288 if (CHIP_IS_E1(bp)) {
5289 /* read NIG statistic
5290 to see if this is our first up since powerup */
5291 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5292 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005293
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005294 /* do internal memory self test */
5295 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5296 BNX2X_ERR("internal mem self test failed\n");
5297 return -EBUSY;
5298 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005299 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005300
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00005301 bnx2x_setup_fan_failure_detection(bp);
5302
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005303 /* clear PXP2 attentions */
5304 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005305
Vladislav Zolotarov4a33bc02011-01-09 02:20:04 +00005306 bnx2x_enable_blocks_attention(bp);
5307 if (CHIP_PARITY_ENABLED(bp))
5308 bnx2x_enable_blocks_parity(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005309
Yaniv Rosner6bbca912008-08-13 15:57:28 -07005310 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005311 /* In E2 2-PORT mode, same ext phy is used for the two paths */
5312 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
5313 CHIP_IS_E1x(bp)) {
5314 u32 shmem_base[2], shmem2_base[2];
5315 shmem_base[0] = bp->common.shmem_base;
5316 shmem2_base[0] = bp->common.shmem2_base;
5317 if (CHIP_IS_E2(bp)) {
5318 shmem_base[1] =
5319 SHMEM2_RD(bp, other_shmem_base_addr);
5320 shmem2_base[1] =
5321 SHMEM2_RD(bp, other_shmem2_base_addr);
5322 }
5323 bnx2x_acquire_phy_lock(bp);
5324 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5325 bp->common.chip_id);
5326 bnx2x_release_phy_lock(bp);
5327 }
Yaniv Rosner6bbca912008-08-13 15:57:28 -07005328 } else
5329 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5330
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005331 return 0;
5332}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005333
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005334static int bnx2x_init_hw_port(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005335{
5336 int port = BP_PORT(bp);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005337 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
Eilon Greenstein1c063282009-02-12 08:36:43 +00005338 u32 low, high;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005339 u32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005340
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005341 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005342
5343 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005344
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005345 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005346 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07005347
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005348 /* Timers bug workaround: disables the pf_master bit in pglue at
5349 * common phase, we need to enable it here before any dmae access are
5350 * attempted. Therefore we manually added the enable-master to the
5351 * port phase (it also happens in the function phase)
5352 */
5353 if (CHIP_IS_E2(bp))
5354 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5355
Eilon Greensteinca003922009-08-12 22:53:28 -07005356 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5357 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5358 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005359 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005360
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005361 /* QM cid (connection) count */
5362 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005363
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005364#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005365 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
Michael Chan37b091b2009-10-10 13:46:55 +00005366 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
5367 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005368#endif
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005369
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005370 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00005371
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005372 if (CHIP_MODE_IS_4_PORT(bp))
5373 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00005374
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005375 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
5376 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5377 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
5378 /* no pause for emulation and FPGA */
5379 low = 0;
5380 high = 513;
5381 } else {
5382 if (IS_MF(bp))
5383 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5384 else if (bp->dev->mtu > 4096) {
5385 if (bp->flags & ONE_PORT_FLAG)
5386 low = 160;
5387 else {
5388 val = bp->dev->mtu;
5389 /* (24*1024 + val*4)/256 */
5390 low = 96 + (val/64) +
5391 ((val % 64) ? 1 : 0);
5392 }
5393 } else
5394 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5395 high = low + 56; /* 14*1024/256 */
5396 }
5397 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5398 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5399 }
5400
5401 if (CHIP_MODE_IS_4_PORT(bp)) {
5402 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5403 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5404 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5405 BRB1_REG_MAC_GUARANTIED_0), 40);
5406 }
Eilon Greenstein1c063282009-02-12 08:36:43 +00005407
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005408 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07005409
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005410 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005411 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005412 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005413 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005414
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005415 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5416 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5417 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5418 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005419 if (CHIP_MODE_IS_4_PORT(bp))
5420 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005421
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005422 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005423 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005424
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005425 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005426
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005427 if (!CHIP_IS_E2(bp)) {
5428 /* configure PBF to work without PAUSE mtu 9000 */
5429 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005430
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005431 /* update threshold */
5432 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5433 /* update init credit */
5434 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005435
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005436 /* probe changes */
5437 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5438 udelay(50);
5439 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5440 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005441
Michael Chan37b091b2009-10-10 13:46:55 +00005442#ifdef BCM_CNIC
5443 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005444#endif
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005445 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005446 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005447
5448 if (CHIP_IS_E1(bp)) {
5449 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5450 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5451 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005452 bnx2x_init_block(bp, HC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005453
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005454 bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5455
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005456 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005457 /* init aeu_mask_attn_func_0/1:
5458 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5459 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5460 * bits 4-7 are used for "per vn group attention" */
Vladislav Zolotarove4901dd2010-12-13 05:44:18 +00005461 val = IS_MF(bp) ? 0xF7 : 0x7;
5462 /* Enable DCBX attention for all but E1 */
5463 val |= CHIP_IS_E1(bp) ? 0 : 0x10;
5464 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005465
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005466 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005467 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005468 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005469 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005470 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005471
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005472 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005473
5474 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5475
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005476 if (!CHIP_IS_E1(bp)) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005477 /* 0x2 disable mf_ov, 0x1 enable */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005478 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005479 (IS_MF_SD(bp) ? 0x1 : 0x2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005480
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005481 if (CHIP_IS_E2(bp)) {
5482 val = 0;
5483 switch (bp->mf_mode) {
5484 case MULTI_FUNCTION_SD:
5485 val = 1;
5486 break;
5487 case MULTI_FUNCTION_SI:
5488 val = 2;
5489 break;
5490 }
5491
5492 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5493 NIG_REG_LLH0_CLS_TYPE), val);
5494 }
Eilon Greenstein1c063282009-02-12 08:36:43 +00005495 {
5496 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5497 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5498 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5499 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005500 }
5501
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005502 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005503 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00005504 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00005505 bp->common.shmem2_base, port)) {
Eilon Greenstein4d295db2009-07-21 05:47:47 +00005506 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5507 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5508 val = REG_RD(bp, reg_addr);
Eliezer Tamirf1410642008-02-28 11:51:50 -08005509 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
Eilon Greenstein4d295db2009-07-21 05:47:47 +00005510 REG_WR(bp, reg_addr, val);
Eliezer Tamirf1410642008-02-28 11:51:50 -08005511 }
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07005512 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005513
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005514 return 0;
5515}
5516
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005517static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5518{
5519 int reg;
5520
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005521 if (CHIP_IS_E1(bp))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005522 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005523 else
5524 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005525
5526 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5527}
5528
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005529static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5530{
5531 bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5532}
5533
5534static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5535{
5536 u32 i, base = FUNC_ILT_BASE(func);
5537 for (i = base; i < base + ILT_PER_FUNC; i++)
5538 bnx2x_ilt_wr(bp, i, 0);
5539}
5540
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005541static int bnx2x_init_hw_func(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005542{
5543 int port = BP_PORT(bp);
5544 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005545 struct bnx2x_ilt *ilt = BP_ILT(bp);
5546 u16 cdu_ilt_start;
Eilon Greenstein8badd272009-02-12 08:36:15 +00005547 u32 addr, val;
Vladislav Zolotarovf4a66892010-10-19 05:13:09 +00005548 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
5549 int i, main_mem_width;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005550
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005551 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005552
Eilon Greenstein8badd272009-02-12 08:36:15 +00005553 /* set MSI reconfigure capability */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005554 if (bp->common.int_block == INT_BLOCK_HC) {
5555 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5556 val = REG_RD(bp, addr);
5557 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5558 REG_WR(bp, addr, val);
5559 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00005560
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005561 ilt = BP_ILT(bp);
5562 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005563
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005564 for (i = 0; i < L2_ILT_LINES(bp); i++) {
5565 ilt->lines[cdu_ilt_start + i].page =
5566 bp->context.vcxt + (ILT_PAGE_CIDS * i);
5567 ilt->lines[cdu_ilt_start + i].page_mapping =
5568 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
5569 /* cdu ilt pages are allocated manually so there's no need to
5570 set the size */
5571 }
5572 bnx2x_ilt_init_op(bp, INITOP_SET);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005573
Michael Chan37b091b2009-10-10 13:46:55 +00005574#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005575 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
Michael Chan37b091b2009-10-10 13:46:55 +00005576
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005577 /* T1 hash bits value determines the T1 number of entries */
5578 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
Michael Chan37b091b2009-10-10 13:46:55 +00005579#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005580
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005581#ifndef BCM_CNIC
5582 /* set NIC mode */
5583 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5584#endif /* BCM_CNIC */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005585
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005586 if (CHIP_IS_E2(bp)) {
5587 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5588
5589 /* Turn on a single ISR mode in IGU if driver is going to use
5590 * INT#x or MSI
5591 */
5592 if (!(bp->flags & USING_MSIX_FLAG))
5593 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5594 /*
5595 * Timers workaround bug: function init part.
5596 * Need to wait 20msec after initializing ILT,
5597 * needed to make sure there are no requests in
5598 * one of the PXP internal queues with "old" ILT addresses
5599 */
5600 msleep(20);
5601 /*
5602 * Master enable - Due to WB DMAE writes performed before this
5603 * register is re-initialized as part of the regular function
5604 * init
5605 */
5606 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5607 /* Enable the function in IGU */
5608 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5609 }
5610
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005611 bp->dmae_ready = 1;
5612
5613 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
5614
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005615 if (CHIP_IS_E2(bp))
5616 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5617
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005618 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5619 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5620 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5621 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5622 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5623 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5624 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5625 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5626 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5627
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005628 if (CHIP_IS_E2(bp)) {
5629 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5630 BP_PATH(bp));
5631 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5632 BP_PATH(bp));
5633 }
5634
5635 if (CHIP_MODE_IS_4_PORT(bp))
5636 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5637
5638 if (CHIP_IS_E2(bp))
5639 REG_WR(bp, QM_REG_PF_EN, 1);
5640
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005641 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005642
5643 if (CHIP_MODE_IS_4_PORT(bp))
5644 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5645
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005646 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5647 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5648 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5649 bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5650 bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5651 bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5652 bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5653 bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5654 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5655 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5656 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005657 if (CHIP_IS_E2(bp))
5658 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5659
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005660 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
5661
5662 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
5663
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005664 if (CHIP_IS_E2(bp))
5665 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5666
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005667 if (IS_MF(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005668 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005669 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005670 }
5671
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005672 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5673
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005674 /* HC init per function */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005675 if (bp->common.int_block == INT_BLOCK_HC) {
5676 if (CHIP_IS_E1H(bp)) {
5677 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5678
5679 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5680 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5681 }
5682 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5683
5684 } else {
5685 int num_segs, sb_idx, prod_offset;
5686
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005687 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5688
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005689 if (CHIP_IS_E2(bp)) {
5690 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5691 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5692 }
5693
5694 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5695
5696 if (CHIP_IS_E2(bp)) {
5697 int dsb_idx = 0;
5698 /**
5699 * Producer memory:
5700 * E2 mode: address 0-135 match to the mapping memory;
5701 * 136 - PF0 default prod; 137 - PF1 default prod;
5702 * 138 - PF2 default prod; 139 - PF3 default prod;
5703 * 140 - PF0 attn prod; 141 - PF1 attn prod;
5704 * 142 - PF2 attn prod; 143 - PF3 attn prod;
5705 * 144-147 reserved.
5706 *
5707 * E1.5 mode - In backward compatible mode;
5708 * for non default SB; each even line in the memory
5709 * holds the U producer and each odd line hold
5710 * the C producer. The first 128 producers are for
5711 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5712 * producers are for the DSB for each PF.
5713 * Each PF has five segments: (the order inside each
5714 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5715 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5716 * 144-147 attn prods;
5717 */
5718 /* non-default-status-blocks */
5719 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5720 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5721 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5722 prod_offset = (bp->igu_base_sb + sb_idx) *
5723 num_segs;
5724
5725 for (i = 0; i < num_segs; i++) {
5726 addr = IGU_REG_PROD_CONS_MEMORY +
5727 (prod_offset + i) * 4;
5728 REG_WR(bp, addr, 0);
5729 }
5730 /* send consumer update with value 0 */
5731 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5732 USTORM_ID, 0, IGU_INT_NOP, 1);
5733 bnx2x_igu_clear_sb(bp,
5734 bp->igu_base_sb + sb_idx);
5735 }
5736
5737 /* default-status-blocks */
5738 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5739 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5740
5741 if (CHIP_MODE_IS_4_PORT(bp))
5742 dsb_idx = BP_FUNC(bp);
5743 else
5744 dsb_idx = BP_E1HVN(bp);
5745
5746 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5747 IGU_BC_BASE_DSB_PROD + dsb_idx :
5748 IGU_NORM_BASE_DSB_PROD + dsb_idx);
5749
5750 for (i = 0; i < (num_segs * E1HVN_MAX);
5751 i += E1HVN_MAX) {
5752 addr = IGU_REG_PROD_CONS_MEMORY +
5753 (prod_offset + i)*4;
5754 REG_WR(bp, addr, 0);
5755 }
5756 /* send consumer update with 0 */
5757 if (CHIP_INT_MODE_IS_BC(bp)) {
5758 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5759 USTORM_ID, 0, IGU_INT_NOP, 1);
5760 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5761 CSTORM_ID, 0, IGU_INT_NOP, 1);
5762 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5763 XSTORM_ID, 0, IGU_INT_NOP, 1);
5764 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5765 TSTORM_ID, 0, IGU_INT_NOP, 1);
5766 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5767 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5768 } else {
5769 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5770 USTORM_ID, 0, IGU_INT_NOP, 1);
5771 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5772 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5773 }
5774 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5775
5776 /* !!! these should become driver const once
5777 rf-tool supports split-68 const */
5778 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5779 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5780 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5781 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5782 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5783 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5784 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005785 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005786
Eliezer Tamirc14423f2008-02-28 11:49:42 -08005787 /* Reset PCIE errors for debug */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005788 REG_WR(bp, 0x2114, 0xffffffff);
5789 REG_WR(bp, 0x2120, 0xffffffff);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005790
5791 bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5792 bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5793 bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5794 bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5795 bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5796 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5797
Vladislav Zolotarovf4a66892010-10-19 05:13:09 +00005798 if (CHIP_IS_E1x(bp)) {
5799 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
5800 main_mem_base = HC_REG_MAIN_MEMORY +
5801 BP_PORT(bp) * (main_mem_size * 4);
5802 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
5803 main_mem_width = 8;
5804
5805 val = REG_RD(bp, main_mem_prty_clr);
5806 if (val)
5807 DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
5808 "block during "
5809 "function init (0x%x)!\n", val);
5810
5811 /* Clear "false" parity errors in MSI-X table */
5812 for (i = main_mem_base;
5813 i < main_mem_base + main_mem_size * 4;
5814 i += main_mem_width) {
5815 bnx2x_read_dmae(bp, i, main_mem_width / 4);
5816 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
5817 i, main_mem_width / 4);
5818 }
5819 /* Clear HC parity attention */
5820 REG_RD(bp, main_mem_prty_clr);
5821 }
5822
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00005823 bnx2x_phy_probe(&bp->link_params);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005824
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005825 return 0;
5826}
5827
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005828int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005829{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005830 int rc = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005831
5832 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005833 BP_ABS_FUNC(bp), load_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005834
5835 bp->dmae_ready = 0;
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08005836 spin_lock_init(&bp->dmae_lock);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005837
5838 switch (load_code) {
5839 case FW_MSG_CODE_DRV_LOAD_COMMON:
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005840 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005841 rc = bnx2x_init_hw_common(bp, load_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005842 if (rc)
5843 goto init_hw_err;
5844 /* no break */
5845
5846 case FW_MSG_CODE_DRV_LOAD_PORT:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005847 rc = bnx2x_init_hw_port(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005848 if (rc)
5849 goto init_hw_err;
5850 /* no break */
5851
5852 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005853 rc = bnx2x_init_hw_func(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005854 if (rc)
5855 goto init_hw_err;
5856 break;
5857
5858 default:
5859 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5860 break;
5861 }
5862
5863 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005864 int mb_idx = BP_FW_MB_IDX(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005865
5866 bp->fw_drv_pulse_wr_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005867 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005868 DRV_PULSE_SEQ_MASK);
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00005869 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5870 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005871
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005872init_hw_err:
5873 bnx2x_gunzip_end(bp);
5874
5875 return rc;
5876}
5877
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005878void bnx2x_free_mem(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005879{
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00005880 bnx2x_gunzip_end(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005881
5882 /* fastpath */
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00005883 bnx2x_free_fp_mem(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005884 /* end of fastpath */
5885
5886 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005887 sizeof(struct host_sp_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005888
5889 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005890 sizeof(struct bnx2x_slowpath));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005891
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005892 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5893 bp->context.size);
5894
5895 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5896
5897 BNX2X_FREE(bp->ilt->lines);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005898
Michael Chan37b091b2009-10-10 13:46:55 +00005899#ifdef BCM_CNIC
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005900 if (CHIP_IS_E2(bp))
5901 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
5902 sizeof(struct host_hc_status_block_e2));
5903 else
5904 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5905 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005906
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005907 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005908#endif
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005909
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005910 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005911
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005912 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
5913 BCM_PAGE_SIZE * NUM_EQ_PAGES);
5914
Tom Herbertab532cf2011-02-16 10:27:02 +00005915 BNX2X_FREE(bp->rx_indir_table);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005916}
5917
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005918
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005919int bnx2x_alloc_mem(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005920{
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00005921 if (bnx2x_gunzip_init(bp))
5922 return -ENOMEM;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005923
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005924#ifdef BCM_CNIC
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005925 if (CHIP_IS_E2(bp))
5926 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
5927 sizeof(struct host_hc_status_block_e2));
5928 else
5929 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
5930 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005931
5932 /* allocate searcher T2 table */
5933 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
5934#endif
5935
5936
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005937 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005938 sizeof(struct host_sp_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005939
5940 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5941 sizeof(struct bnx2x_slowpath));
5942
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005943 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005944
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005945 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
5946 bp->context.size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005947
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005948 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005949
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005950 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
5951 goto alloc_mem_err;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005952
5953 /* Slow path ring */
5954 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5955
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005956 /* EQ */
5957 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
5958 BCM_PAGE_SIZE * NUM_EQ_PAGES);
Tom Herbertab532cf2011-02-16 10:27:02 +00005959
5960 BNX2X_ALLOC(bp->rx_indir_table, sizeof(bp->rx_indir_table[0]) *
5961 TSTORM_INDIRECTION_TABLE_SIZE);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00005962
5963 /* fastpath */
5964 /* need to be done at the end, since it's self adjusting to amount
5965 * of memory available for RSS queues
5966 */
5967 if (bnx2x_alloc_fp_mem(bp))
5968 goto alloc_mem_err;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005969 return 0;
5970
5971alloc_mem_err:
5972 bnx2x_free_mem(bp);
5973 return -ENOMEM;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005974}
5975
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005976/*
5977 * Init service functions
5978 */
stephen hemminger8d962862010-10-21 07:50:56 +00005979static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
5980 int *state_p, int flags);
5981
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005982int bnx2x_func_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005983{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005984 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005985
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005986 /* Wait for completion */
5987 return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
5988 WAIT_RAMROD_COMMON);
5989}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005990
stephen hemminger8d962862010-10-21 07:50:56 +00005991static int bnx2x_func_stop(struct bnx2x *bp)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005992{
5993 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005994
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005995 /* Wait for completion */
5996 return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
5997 0, &(bp->state), WAIT_RAMROD_COMMON);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005998}
5999
Michael Chane665bfd2009-10-10 13:46:54 +00006000/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00006001 * bnx2x_set_mac_addr_gen - set a MAC in a CAM for a few L2 Clients for E1x chips
Michael Chane665bfd2009-10-10 13:46:54 +00006002 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00006003 * @bp: driver handle
6004 * @set: set or clear an entry (1 or 0)
6005 * @mac: pointer to a buffer containing a MAC
6006 * @cl_bit_vec: bit vector of clients to register a MAC for
6007 * @cam_offset: offset in a CAM to use
6008 * @is_bcast: is the set MAC a broadcast address (for E1 only)
Michael Chane665bfd2009-10-10 13:46:54 +00006009 */
Joe Perches215faf92010-12-21 02:16:10 -08006010static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006011 u32 cl_bit_vec, u8 cam_offset,
6012 u8 is_bcast)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006013{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006014 struct mac_configuration_cmd *config =
6015 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
6016 int ramrod_flags = WAIT_RAMROD_COMMON;
6017
6018 bp->set_mac_pending = 1;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006019
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08006020 config->hdr.length = 1;
Michael Chane665bfd2009-10-10 13:46:54 +00006021 config->hdr.offset = cam_offset;
6022 config->hdr.client_id = 0xff;
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08006023 /* Mark the single MAC configuration ramrod as opposed to a
6024 * UC/MC list configuration).
6025 */
6026 config->hdr.echo = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006027
6028 /* primary MAC */
6029 config->config_table[0].msb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00006030 swab16(*(u16 *)&mac[0]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006031 config->config_table[0].middle_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00006032 swab16(*(u16 *)&mac[2]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006033 config->config_table[0].lsb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00006034 swab16(*(u16 *)&mac[4]);
Eilon Greensteinca003922009-08-12 22:53:28 -07006035 config->config_table[0].clients_bit_vector =
Michael Chane665bfd2009-10-10 13:46:54 +00006036 cpu_to_le32(cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006037 config->config_table[0].vlan_id = 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006038 config->config_table[0].pf_id = BP_FUNC(bp);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006039 if (set)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006040 SET_FLAG(config->config_table[0].flags,
6041 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6042 T_ETH_MAC_COMMAND_SET);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006043 else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006044 SET_FLAG(config->config_table[0].flags,
6045 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6046 T_ETH_MAC_COMMAND_INVALIDATE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006047
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006048 if (is_bcast)
6049 SET_FLAG(config->config_table[0].flags,
6050 MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
6051
6052 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006053 (set ? "setting" : "clearing"),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006054 config->config_table[0].msb_mac_addr,
6055 config->config_table[0].middle_mac_addr,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006056 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006057
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08006058 mb();
6059
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006060 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006061 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006062 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
6063
6064 /* Wait for a completion */
6065 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006066}
6067
stephen hemminger8d962862010-10-21 07:50:56 +00006068static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6069 int *state_p, int flags)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006070{
6071 /* can take a while if any port is running */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006072 int cnt = 5000;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006073 u8 poll = flags & WAIT_RAMROD_POLL;
6074 u8 common = flags & WAIT_RAMROD_COMMON;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006075
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006076 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6077 poll ? "polling" : "waiting", state, idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006078
6079 might_sleep();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006080 while (cnt--) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006081 if (poll) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006082 if (common)
6083 bnx2x_eq_int(bp);
6084 else {
6085 bnx2x_rx_int(bp->fp, 10);
6086 /* if index is different from 0
6087 * the reply for some commands will
6088 * be on the non default queue
6089 */
6090 if (idx)
6091 bnx2x_rx_int(&bp->fp[idx], 10);
6092 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006093 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006094
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006095 mb(); /* state is changed by bnx2x_sp_event() */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006096 if (*state_p == state) {
6097#ifdef BNX2X_STOP_ON_ERROR
6098 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6099#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006100 return 0;
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006101 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006102
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006103 msleep(1);
Eilon Greensteine3553b22009-08-12 08:23:31 +00006104
6105 if (bp->panic)
6106 return -EIO;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006107 }
6108
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006109 /* timeout! */
Eliezer Tamir49d66772008-02-28 11:53:13 -08006110 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6111 poll ? "polling" : "waiting", state, idx);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006112#ifdef BNX2X_STOP_ON_ERROR
6113 bnx2x_panic();
6114#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006115
Eliezer Tamir49d66772008-02-28 11:53:13 -08006116 return -EBUSY;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006117}
6118
stephen hemminger8d962862010-10-21 07:50:56 +00006119static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
Michael Chane665bfd2009-10-10 13:46:54 +00006120{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006121 if (CHIP_IS_E1H(bp))
6122 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6123 else if (CHIP_MODE_IS_4_PORT(bp))
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08006124 return E2_FUNC_MAX * rel_offset + BP_FUNC(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006125 else
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08006126 return E2_FUNC_MAX * rel_offset + BP_VN(bp);
Michael Chane665bfd2009-10-10 13:46:54 +00006127}
6128
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08006129/**
6130 * LLH CAM line allocations: currently only iSCSI and ETH macs are
6131 * relevant. In addition, current implementation is tuned for a
6132 * single ETH MAC.
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08006133 */
6134enum {
6135 LLH_CAM_ISCSI_ETH_LINE = 0,
6136 LLH_CAM_ETH_LINE,
6137 LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE
6138};
6139
6140static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
6141 int set,
6142 unsigned char *dev_addr,
6143 int index)
6144{
6145 u32 wb_data[2];
6146 u32 mem_offset, ena_offset, mem_index;
6147 /**
6148 * indexes mapping:
6149 * 0..7 - goes to MEM
6150 * 8..15 - goes to MEM2
6151 */
6152
6153 if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
6154 return;
6155
6156 /* calculate memory start offset according to the mapping
6157 * and index in the memory */
6158 if (index < NIG_LLH_FUNC_MEM_MAX_OFFSET) {
6159 mem_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
6160 NIG_REG_LLH0_FUNC_MEM;
6161 ena_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
6162 NIG_REG_LLH0_FUNC_MEM_ENABLE;
6163 mem_index = index;
6164 } else {
6165 mem_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2 :
6166 NIG_REG_P0_LLH_FUNC_MEM2;
6167 ena_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE :
6168 NIG_REG_P0_LLH_FUNC_MEM2_ENABLE;
6169 mem_index = index - NIG_LLH_FUNC_MEM_MAX_OFFSET;
6170 }
6171
6172 if (set) {
6173 /* LLH_FUNC_MEM is a u64 WB register */
6174 mem_offset += 8*mem_index;
6175
6176 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
6177 (dev_addr[4] << 8) | dev_addr[5]);
6178 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
6179
6180 REG_WR_DMAE(bp, mem_offset, wb_data, 2);
6181 }
6182
6183 /* enable/disable the entry */
6184 REG_WR(bp, ena_offset + 4*mem_index, set);
6185
6186}
6187
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006188void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
Michael Chane665bfd2009-10-10 13:46:54 +00006189{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006190 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
6191 bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
6192
6193 /* networking MAC */
6194 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6195 (1 << bp->fp->cl_id), cam_offset , 0);
6196
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08006197 bnx2x_set_mac_in_nig(bp, set, bp->dev->dev_addr, LLH_CAM_ETH_LINE);
6198
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006199 if (CHIP_IS_E1(bp)) {
6200 /* broadcast MAC */
Joe Perches215faf92010-12-21 02:16:10 -08006201 static const u8 bcast[ETH_ALEN] = {
6202 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
6203 };
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006204 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6205 }
6206}
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08006207
6208static inline u8 bnx2x_e1_cam_mc_offset(struct bnx2x *bp)
6209{
6210 return CHIP_REV_IS_SLOW(bp) ?
6211 (BNX2X_MAX_EMUL_MULTI * (1 + BP_PORT(bp))) :
6212 (BNX2X_MAX_MULTICAST * (1 + BP_PORT(bp)));
6213}
6214
6215/* set mc list, do not wait as wait implies sleep and
6216 * set_rx_mode can be invoked from non-sleepable context.
6217 *
6218 * Instead we use the same ramrod data buffer each time we need
6219 * to configure a list of addresses, and use the fact that the
6220 * list of MACs is changed in an incremental way and that the
6221 * function is called under the netif_addr_lock. A temporary
6222 * inconsistent CAM configuration (possible in case of a very fast
6223 * sequence of add/del/add on the host side) will shortly be
6224 * restored by the handler of the last ramrod.
6225 */
6226static int bnx2x_set_e1_mc_list(struct bnx2x *bp)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006227{
6228 int i = 0, old;
6229 struct net_device *dev = bp->dev;
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08006230 u8 offset = bnx2x_e1_cam_mc_offset(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006231 struct netdev_hw_addr *ha;
6232 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6233 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6234
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08006235 if (netdev_mc_count(dev) > BNX2X_MAX_MULTICAST)
6236 return -EINVAL;
6237
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006238 netdev_for_each_mc_addr(ha, dev) {
6239 /* copy mac */
6240 config_cmd->config_table[i].msb_mac_addr =
6241 swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
6242 config_cmd->config_table[i].middle_mac_addr =
6243 swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
6244 config_cmd->config_table[i].lsb_mac_addr =
6245 swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
6246
6247 config_cmd->config_table[i].vlan_id = 0;
6248 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
6249 config_cmd->config_table[i].clients_bit_vector =
6250 cpu_to_le32(1 << BP_L_ID(bp));
6251
6252 SET_FLAG(config_cmd->config_table[i].flags,
6253 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6254 T_ETH_MAC_COMMAND_SET);
6255
6256 DP(NETIF_MSG_IFUP,
6257 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6258 config_cmd->config_table[i].msb_mac_addr,
6259 config_cmd->config_table[i].middle_mac_addr,
6260 config_cmd->config_table[i].lsb_mac_addr);
6261 i++;
6262 }
6263 old = config_cmd->hdr.length;
6264 if (old > i) {
6265 for (; i < old; i++) {
6266 if (CAM_IS_INVALID(config_cmd->
6267 config_table[i])) {
6268 /* already invalidated */
6269 break;
6270 }
6271 /* invalidate */
6272 SET_FLAG(config_cmd->config_table[i].flags,
6273 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6274 T_ETH_MAC_COMMAND_INVALIDATE);
6275 }
6276 }
6277
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08006278 wmb();
6279
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006280 config_cmd->hdr.length = i;
6281 config_cmd->hdr.offset = offset;
6282 config_cmd->hdr.client_id = 0xff;
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08006283 /* Mark that this ramrod doesn't use bp->set_mac_pending for
6284 * synchronization.
6285 */
6286 config_cmd->hdr.echo = 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006287
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08006288 mb();
Michael Chane665bfd2009-10-10 13:46:54 +00006289
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08006290 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006291 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6292}
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08006293
6294void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006295{
6296 int i;
6297 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6298 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6299 int ramrod_flags = WAIT_RAMROD_COMMON;
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08006300 u8 offset = bnx2x_e1_cam_mc_offset(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006301
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08006302 for (i = 0; i < BNX2X_MAX_MULTICAST; i++)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006303 SET_FLAG(config_cmd->config_table[i].flags,
6304 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6305 T_ETH_MAC_COMMAND_INVALIDATE);
6306
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08006307 wmb();
6308
6309 config_cmd->hdr.length = BNX2X_MAX_MULTICAST;
6310 config_cmd->hdr.offset = offset;
6311 config_cmd->hdr.client_id = 0xff;
6312 /* We'll wait for a completion this time... */
6313 config_cmd->hdr.echo = 1;
6314
6315 bp->set_mac_pending = 1;
6316
6317 mb();
6318
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006319 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6320 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
Michael Chane665bfd2009-10-10 13:46:54 +00006321
6322 /* Wait for a completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006323 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
6324 ramrod_flags);
6325
Michael Chane665bfd2009-10-10 13:46:54 +00006326}
6327
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08006328/* Accept one or more multicasts */
6329static int bnx2x_set_e1h_mc_list(struct bnx2x *bp)
6330{
6331 struct net_device *dev = bp->dev;
6332 struct netdev_hw_addr *ha;
6333 u32 mc_filter[MC_HASH_SIZE];
6334 u32 crc, bit, regidx;
6335 int i;
6336
6337 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
6338
6339 netdev_for_each_mc_addr(ha, dev) {
6340 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
6341 bnx2x_mc_addr(ha));
6342
6343 crc = crc32c_le(0, bnx2x_mc_addr(ha),
6344 ETH_ALEN);
6345 bit = (crc >> 24) & 0xff;
6346 regidx = bit >> 5;
6347 bit &= 0x1f;
6348 mc_filter[regidx] |= (1 << bit);
6349 }
6350
6351 for (i = 0; i < MC_HASH_SIZE; i++)
6352 REG_WR(bp, MC_HASH_OFFSET(bp, i),
6353 mc_filter[i]);
6354
6355 return 0;
6356}
6357
6358void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp)
6359{
6360 int i;
6361
6362 for (i = 0; i < MC_HASH_SIZE; i++)
6363 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6364}
6365
Michael Chan993ac7b2009-10-10 13:46:56 +00006366#ifdef BCM_CNIC
6367/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00006368 * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
Michael Chan993ac7b2009-10-10 13:46:56 +00006369 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00006370 * @bp: driver handle
6371 * @set: set or clear the CAM entry
Michael Chan993ac7b2009-10-10 13:46:56 +00006372 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00006373 * This function will wait until the ramdord completion returns.
6374 * Return 0 if success, -ENODEV if ramrod doesn't return.
Michael Chan993ac7b2009-10-10 13:46:56 +00006375 */
stephen hemminger8d962862010-10-21 07:50:56 +00006376static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
Michael Chan993ac7b2009-10-10 13:46:56 +00006377{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006378 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
6379 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006380 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
6381 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006382 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00006383 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
Michael Chan993ac7b2009-10-10 13:46:56 +00006384
6385 /* Send a SET_MAC ramrod */
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00006386 bnx2x_set_mac_addr_gen(bp, set, iscsi_mac, cl_bit_vec,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006387 cam_offset, 0);
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08006388
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00006389 bnx2x_set_mac_in_nig(bp, set, iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006390
6391 return 0;
6392}
6393
6394/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00006395 * bnx2x_set_fip_eth_mac_addr - set FCoE L2 MAC(s)
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006396 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00006397 * @bp: driver handle
6398 * @set: set or clear the CAM entry
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006399 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00006400 * This function will wait until the ramrod completion returns.
6401 * Returns 0 if success, -ENODEV if ramrod doesn't return.
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006402 */
6403int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set)
6404{
6405 u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
6406 /**
6407 * CAM allocation for E1H
6408 * eth unicasts: by func number
6409 * iscsi: by func number
6410 * fip unicast: by func number
6411 * fip multicast: by func number
6412 */
6413 bnx2x_set_mac_addr_gen(bp, set, bp->fip_mac,
6414 cl_bit_vec, bnx2x_e1h_cam_offset(bp, CAM_FIP_ETH_LINE), 0);
6415
6416 return 0;
6417}
6418
6419int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set)
6420{
6421 u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
6422
6423 /**
6424 * CAM allocation for E1H
6425 * eth unicasts: by func number
6426 * iscsi: by func number
6427 * fip unicast: by func number
6428 * fip multicast: by func number
6429 */
6430 bnx2x_set_mac_addr_gen(bp, set, ALL_ENODE_MACS, cl_bit_vec,
6431 bnx2x_e1h_cam_offset(bp, CAM_FIP_MCAST_LINE), 0);
6432
Michael Chan993ac7b2009-10-10 13:46:56 +00006433 return 0;
6434}
6435#endif
6436
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006437static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
6438 struct bnx2x_client_init_params *params,
6439 u8 activate,
6440 struct client_init_ramrod_data *data)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006441{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006442 /* Clear the buffer */
6443 memset(data, 0, sizeof(*data));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006444
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006445 /* general */
6446 data->general.client_id = params->rxq_params.cl_id;
6447 data->general.statistics_counter_id = params->rxq_params.stat_id;
6448 data->general.statistics_en_flg =
6449 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006450 data->general.is_fcoe_flg =
6451 (params->ramrod_params.flags & CLIENT_IS_FCOE) ? 1 : 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006452 data->general.activate_flg = activate;
6453 data->general.sp_client_id = params->rxq_params.spcl_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006454
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006455 /* Rx data */
6456 data->rx.tpa_en_flg =
6457 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
6458 data->rx.vmqueue_mode_en_flg = 0;
6459 data->rx.cache_line_alignment_log_size =
6460 params->rxq_params.cache_line_log;
6461 data->rx.enable_dynamic_hc =
6462 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
6463 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
6464 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
6465 data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
6466
6467 /* We don't set drop flags */
6468 data->rx.drop_ip_cs_err_flg = 0;
6469 data->rx.drop_tcp_cs_err_flg = 0;
6470 data->rx.drop_ttl0_flg = 0;
6471 data->rx.drop_udp_cs_err_flg = 0;
6472
6473 data->rx.inner_vlan_removal_enable_flg =
6474 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
6475 data->rx.outer_vlan_removal_enable_flg =
6476 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
6477 data->rx.status_block_id = params->rxq_params.fw_sb_id;
6478 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
6479 data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
6480 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
6481 data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
6482 data->rx.bd_page_base.lo =
6483 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
6484 data->rx.bd_page_base.hi =
6485 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
6486 data->rx.sge_page_base.lo =
6487 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
6488 data->rx.sge_page_base.hi =
6489 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
6490 data->rx.cqe_page_base.lo =
6491 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
6492 data->rx.cqe_page_base.hi =
6493 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
6494 data->rx.is_leading_rss =
6495 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
6496 data->rx.is_approx_mcast = data->rx.is_leading_rss;
6497
6498 /* Tx data */
6499 data->tx.enforce_security_flg = 0; /* VF specific */
6500 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
6501 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
6502 data->tx.mtu = 0; /* VF specific */
6503 data->tx.tx_bd_page_base.lo =
6504 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
6505 data->tx.tx_bd_page_base.hi =
6506 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
6507
6508 /* flow control data */
6509 data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
6510 data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
6511 data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
6512 data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
6513 data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
6514 data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
6515 data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
6516
6517 data->fc.safc_group_num = params->txq_params.cos;
6518 data->fc.safc_group_en_flg =
6519 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006520 data->fc.traffic_type =
6521 (params->ramrod_params.flags & CLIENT_IS_FCOE) ?
6522 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006523}
6524
6525static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6526{
6527 /* ustorm cxt validation */
6528 cxt->ustorm_ag_context.cdu_usage =
6529 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
6530 ETH_CONNECTION_TYPE);
6531 /* xcontext validation */
6532 cxt->xstorm_ag_context.cdu_reserved =
6533 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
6534 ETH_CONNECTION_TYPE);
6535}
6536
stephen hemminger8d962862010-10-21 07:50:56 +00006537static int bnx2x_setup_fw_client(struct bnx2x *bp,
6538 struct bnx2x_client_init_params *params,
6539 u8 activate,
6540 struct client_init_ramrod_data *data,
6541 dma_addr_t data_mapping)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006542{
6543 u16 hc_usec;
6544 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
6545 int ramrod_flags = 0, rc;
6546
6547 /* HC and context validation values */
6548 hc_usec = params->txq_params.hc_rate ?
6549 1000000 / params->txq_params.hc_rate : 0;
6550 bnx2x_update_coalesce_sb_index(bp,
6551 params->txq_params.fw_sb_id,
6552 params->txq_params.sb_cq_index,
6553 !(params->txq_params.flags & QUEUE_FLG_HC),
6554 hc_usec);
6555
6556 *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
6557
6558 hc_usec = params->rxq_params.hc_rate ?
6559 1000000 / params->rxq_params.hc_rate : 0;
6560 bnx2x_update_coalesce_sb_index(bp,
6561 params->rxq_params.fw_sb_id,
6562 params->rxq_params.sb_cq_index,
6563 !(params->rxq_params.flags & QUEUE_FLG_HC),
6564 hc_usec);
6565
6566 bnx2x_set_ctx_validation(params->rxq_params.cxt,
6567 params->rxq_params.cid);
6568
6569 /* zero stats */
6570 if (params->txq_params.flags & QUEUE_FLG_STATS)
6571 storm_memset_xstats_zero(bp, BP_PORT(bp),
6572 params->txq_params.stat_id);
6573
6574 if (params->rxq_params.flags & QUEUE_FLG_STATS) {
6575 storm_memset_ustats_zero(bp, BP_PORT(bp),
6576 params->rxq_params.stat_id);
6577 storm_memset_tstats_zero(bp, BP_PORT(bp),
6578 params->rxq_params.stat_id);
6579 }
6580
6581 /* Fill the ramrod data */
6582 bnx2x_fill_cl_init_data(bp, params, activate, data);
6583
6584 /* SETUP ramrod.
6585 *
6586 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6587 * barrier except from mmiowb() is needed to impose a
6588 * proper ordering of memory operations.
6589 */
6590 mmiowb();
6591
6592
6593 bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
6594 U64_HI(data_mapping), U64_LO(data_mapping), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006595
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006596 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006597 rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
6598 params->ramrod_params.index,
6599 params->ramrod_params.pstate,
6600 ramrod_flags);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006601 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006602}
6603
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006604/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00006605 * bnx2x_set_int_mode - configure interrupt mode
6606 *
6607 * @bp: driver handle
6608 *
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006609 * In case of MSI-X it will also try to enable MSI-X.
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006610 */
Dmitry Kravkov9ee3d372011-06-14 01:33:34 +00006611static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006612{
Eilon Greensteinca003922009-08-12 22:53:28 -07006613
Dmitry Kravkov9ee3d372011-06-14 01:33:34 +00006614 switch (int_mode) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006615 case INT_MODE_MSI:
6616 bnx2x_enable_msi(bp);
6617 /* falling through... */
6618 case INT_MODE_INTx:
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006619 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006620 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
Eilon Greensteinca003922009-08-12 22:53:28 -07006621 break;
Eilon Greensteinca003922009-08-12 22:53:28 -07006622 default:
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006623 /* Set number of queues according to bp->multi_mode value */
6624 bnx2x_set_num_queues(bp);
6625
6626 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
6627 bp->num_queues);
6628
6629 /* if we can't use MSI-X we only need one fp,
6630 * so try to enable MSI-X with the requested number of fp's
6631 * and fallback to MSI or legacy INTx with one fp
6632 */
Dmitry Kravkov9ee3d372011-06-14 01:33:34 +00006633 if (bnx2x_enable_msix(bp)) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006634 /* failed to enable MSI-X */
6635 if (bp->multi_mode)
6636 DP(NETIF_MSG_IFUP,
6637 "Multi requested but failed to "
6638 "enable MSI-X (%d), "
6639 "set number of queues to %d\n",
6640 bp->num_queues,
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006641 1 + NONE_ETH_CONTEXT_USE);
6642 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006643
Dmitry Kravkov9ee3d372011-06-14 01:33:34 +00006644 /* Try to enable MSI */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006645 if (!(bp->flags & DISABLE_MSI_FLAG))
6646 bnx2x_enable_msi(bp);
6647 }
Eilon Greensteinca003922009-08-12 22:53:28 -07006648 break;
6649 }
Eilon Greensteinca003922009-08-12 22:53:28 -07006650}
6651
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00006652/* must be called prioir to any HW initializations */
6653static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
6654{
6655 return L2_ILT_LINES(bp);
6656}
6657
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006658void bnx2x_ilt_set_info(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006659{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006660 struct ilt_client_info *ilt_client;
6661 struct bnx2x_ilt *ilt = BP_ILT(bp);
6662 u16 line = 0;
6663
6664 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
6665 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
6666
6667 /* CDU */
6668 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6669 ilt_client->client_num = ILT_CLIENT_CDU;
6670 ilt_client->page_size = CDU_ILT_PAGE_SZ;
6671 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6672 ilt_client->start = line;
6673 line += L2_ILT_LINES(bp);
6674#ifdef BCM_CNIC
6675 line += CNIC_ILT_LINES;
6676#endif
6677 ilt_client->end = line - 1;
6678
6679 DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6680 "flags 0x%x, hw psz %d\n",
6681 ilt_client->start,
6682 ilt_client->end,
6683 ilt_client->page_size,
6684 ilt_client->flags,
6685 ilog2(ilt_client->page_size >> 12));
6686
6687 /* QM */
6688 if (QM_INIT(bp->qm_cid_count)) {
6689 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6690 ilt_client->client_num = ILT_CLIENT_QM;
6691 ilt_client->page_size = QM_ILT_PAGE_SZ;
6692 ilt_client->flags = 0;
6693 ilt_client->start = line;
6694
6695 /* 4 bytes for each cid */
6696 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6697 QM_ILT_PAGE_SZ);
6698
6699 ilt_client->end = line - 1;
6700
6701 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6702 "flags 0x%x, hw psz %d\n",
6703 ilt_client->start,
6704 ilt_client->end,
6705 ilt_client->page_size,
6706 ilt_client->flags,
6707 ilog2(ilt_client->page_size >> 12));
6708
6709 }
6710 /* SRC */
6711 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6712#ifdef BCM_CNIC
6713 ilt_client->client_num = ILT_CLIENT_SRC;
6714 ilt_client->page_size = SRC_ILT_PAGE_SZ;
6715 ilt_client->flags = 0;
6716 ilt_client->start = line;
6717 line += SRC_ILT_LINES;
6718 ilt_client->end = line - 1;
6719
6720 DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6721 "flags 0x%x, hw psz %d\n",
6722 ilt_client->start,
6723 ilt_client->end,
6724 ilt_client->page_size,
6725 ilt_client->flags,
6726 ilog2(ilt_client->page_size >> 12));
6727
6728#else
6729 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6730#endif
6731
6732 /* TM */
6733 ilt_client = &ilt->clients[ILT_CLIENT_TM];
6734#ifdef BCM_CNIC
6735 ilt_client->client_num = ILT_CLIENT_TM;
6736 ilt_client->page_size = TM_ILT_PAGE_SZ;
6737 ilt_client->flags = 0;
6738 ilt_client->start = line;
6739 line += TM_ILT_LINES;
6740 ilt_client->end = line - 1;
6741
6742 DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6743 "flags 0x%x, hw psz %d\n",
6744 ilt_client->start,
6745 ilt_client->end,
6746 ilt_client->page_size,
6747 ilt_client->flags,
6748 ilog2(ilt_client->page_size >> 12));
6749
6750#else
6751 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6752#endif
6753}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006754
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006755int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6756 int is_leading)
6757{
6758 struct bnx2x_client_init_params params = { {0} };
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006759 int rc;
6760
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006761 /* reset IGU state skip FCoE L2 queue */
6762 if (!IS_FCOE_FP(fp))
6763 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006764 IGU_INT_ENABLE, 0);
6765
6766 params.ramrod_params.pstate = &fp->state;
6767 params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
6768 params.ramrod_params.index = fp->index;
6769 params.ramrod_params.cid = fp->cid;
6770
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006771#ifdef BCM_CNIC
6772 if (IS_FCOE_FP(fp))
6773 params.ramrod_params.flags |= CLIENT_IS_FCOE;
6774
6775#endif
6776
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006777 if (is_leading)
6778 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
6779
6780 bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
6781
6782 bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
6783
6784 rc = bnx2x_setup_fw_client(bp, &params, 1,
6785 bnx2x_sp(bp, client_init_data),
6786 bnx2x_sp_mapping(bp, client_init_data));
6787 return rc;
6788}
6789
stephen hemminger8d962862010-10-21 07:50:56 +00006790static int bnx2x_stop_fw_client(struct bnx2x *bp,
6791 struct bnx2x_client_ramrod_params *p)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006792{
6793 int rc;
6794
6795 int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
6796
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006797 /* halt the connection */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006798 *p->pstate = BNX2X_FP_STATE_HALTING;
6799 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
6800 p->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006801
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006802 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006803 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
6804 p->pstate, poll_flag);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006805 if (rc) /* timeout */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006806 return rc;
6807
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006808 *p->pstate = BNX2X_FP_STATE_TERMINATING;
6809 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
6810 p->cl_id, 0);
6811 /* Wait for completion */
6812 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
6813 p->pstate, poll_flag);
6814 if (rc) /* timeout */
6815 return rc;
6816
6817
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006818 /* delete cfc entry */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006819 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006820
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006821 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006822 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
6823 p->pstate, WAIT_RAMROD_COMMON);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006824 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006825}
6826
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006827static int bnx2x_stop_client(struct bnx2x *bp, int index)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006828{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006829 struct bnx2x_client_ramrod_params client_stop = {0};
6830 struct bnx2x_fastpath *fp = &bp->fp[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006831
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006832 client_stop.index = index;
6833 client_stop.cid = fp->cid;
6834 client_stop.cl_id = fp->cl_id;
6835 client_stop.pstate = &(fp->state);
6836 client_stop.poll = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006837
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006838 return bnx2x_stop_fw_client(bp, &client_stop);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006839}
6840
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006841
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006842static void bnx2x_reset_func(struct bnx2x *bp)
6843{
6844 int port = BP_PORT(bp);
6845 int func = BP_FUNC(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006846 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006847 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006848 (CHIP_IS_E2(bp) ?
6849 offsetof(struct hc_status_block_data_e2, common) :
6850 offsetof(struct hc_status_block_data_e1x, common));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006851 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
6852 int pfid_offset = offsetof(struct pci_entity, pf_id);
6853
6854 /* Disable the function in the FW */
6855 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
6856 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
6857 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
6858 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
6859
6860 /* FP SBs */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006861 for_each_eth_queue(bp, i) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006862 struct bnx2x_fastpath *fp = &bp->fp[i];
6863 REG_WR8(bp,
6864 BAR_CSTRORM_INTMEM +
6865 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
6866 + pfunc_offset_fp + pfid_offset,
6867 HC_FUNCTION_DISABLED);
6868 }
6869
6870 /* SP SB */
6871 REG_WR8(bp,
6872 BAR_CSTRORM_INTMEM +
6873 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
6874 pfunc_offset_sp + pfid_offset,
6875 HC_FUNCTION_DISABLED);
6876
6877
6878 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
6879 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
6880 0);
Eliezer Tamir49d66772008-02-28 11:53:13 -08006881
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006882 /* Configure IGU */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006883 if (bp->common.int_block == INT_BLOCK_HC) {
6884 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6885 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6886 } else {
6887 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
6888 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
6889 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006890
Michael Chan37b091b2009-10-10 13:46:55 +00006891#ifdef BCM_CNIC
6892 /* Disable Timer scan */
6893 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
6894 /*
6895 * Wait for at least 10ms and up to 2 second for the timers scan to
6896 * complete
6897 */
6898 for (i = 0; i < 200; i++) {
6899 msleep(10);
6900 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
6901 break;
6902 }
6903#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006904 /* Clear ILT */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006905 bnx2x_clear_func_ilt(bp, func);
6906
6907 /* Timers workaround bug for E2: if this is vnic-3,
6908 * we need to set the entire ilt range for this timers.
6909 */
6910 if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
6911 struct ilt_client_info ilt_cli;
6912 /* use dummy TM client */
6913 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6914 ilt_cli.start = 0;
6915 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6916 ilt_cli.client_num = ILT_CLIENT_TM;
6917
6918 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
6919 }
6920
6921 /* this assumes that reset_port() called before reset_func()*/
6922 if (CHIP_IS_E2(bp))
6923 bnx2x_pf_disable(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006924
6925 bp->dmae_ready = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006926}
6927
6928static void bnx2x_reset_port(struct bnx2x *bp)
6929{
6930 int port = BP_PORT(bp);
6931 u32 val;
6932
6933 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6934
6935 /* Do not rcv packets to BRB */
6936 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6937 /* Do not direct rcv packets that are not for MCP to the BRB */
6938 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6939 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6940
6941 /* Configure AEU */
6942 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6943
6944 msleep(100);
6945 /* Check for BRB port occupancy */
6946 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6947 if (val)
6948 DP(NETIF_MSG_IFDOWN,
Eilon Greenstein33471622008-08-13 15:59:08 -07006949 "BRB1 is not empty %d blocks are occupied\n", val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006950
6951 /* TODO: Close Doorbell port? */
6952}
6953
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006954static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6955{
6956 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006957 BP_ABS_FUNC(bp), reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006958
6959 switch (reset_code) {
6960 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6961 bnx2x_reset_port(bp);
6962 bnx2x_reset_func(bp);
6963 bnx2x_reset_common(bp);
6964 break;
6965
6966 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6967 bnx2x_reset_port(bp);
6968 bnx2x_reset_func(bp);
6969 break;
6970
6971 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6972 bnx2x_reset_func(bp);
6973 break;
6974
6975 default:
6976 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6977 break;
6978 }
6979}
6980
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006981#ifdef BCM_CNIC
6982static inline void bnx2x_del_fcoe_eth_macs(struct bnx2x *bp)
6983{
6984 if (bp->flags & FCOE_MACS_SET) {
6985 if (!IS_MF_SD(bp))
6986 bnx2x_set_fip_eth_mac_addr(bp, 0);
6987
6988 bnx2x_set_all_enode_macs(bp, 0);
6989
6990 bp->flags &= ~FCOE_MACS_SET;
6991 }
6992}
6993#endif
6994
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00006995void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006996{
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006997 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006998 u32 reset_code = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006999 int i, cnt, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007000
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007001 /* Wait until tx fastpath tasks complete */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00007002 for_each_tx_queue(bp, i) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08007003 struct bnx2x_fastpath *fp = &bp->fp[i];
7004
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007005 cnt = 1000;
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -08007006 while (bnx2x_has_tx_work_unload(fp)) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007007
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007008 if (!cnt) {
7009 BNX2X_ERR("timeout waiting for queue[%d]\n",
7010 i);
7011#ifdef BNX2X_STOP_ON_ERROR
7012 bnx2x_panic();
7013 return -EBUSY;
7014#else
7015 break;
7016#endif
7017 }
7018 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007019 msleep(1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007020 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08007021 }
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007022 /* Give HW time to discard old tx messages */
7023 msleep(1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007024
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08007025 bnx2x_set_eth_mac(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007026
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08007027 bnx2x_invalidate_uc_list(bp);
7028
7029 if (CHIP_IS_E1(bp))
7030 bnx2x_invalidate_e1_mc_list(bp);
7031 else {
7032 bnx2x_invalidate_e1h_mc_list(bp);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007033 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007034 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007035
Michael Chan993ac7b2009-10-10 13:46:56 +00007036#ifdef BCM_CNIC
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00007037 bnx2x_del_fcoe_eth_macs(bp);
Michael Chan993ac7b2009-10-10 13:46:56 +00007038#endif
Yitchak Gertner65abd742008-08-25 15:26:24 -07007039
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007040 if (unload_mode == UNLOAD_NORMAL)
7041 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007042
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00007043 else if (bp->flags & NO_WOL_FLAG)
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007044 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007045
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00007046 else if (bp->wol) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007047 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007048 u8 *mac_addr = bp->dev->dev_addr;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007049 u32 val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007050 /* The mac address is written to entries 1-4 to
7051 preserve entry 0 which is used by the PMF */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007052 u8 entry = (BP_E1HVN(bp) + 1)*8;
7053
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007054 val = (mac_addr[0] << 8) | mac_addr[1];
Eilon Greenstein3196a882008-08-13 15:58:49 -07007055 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007056
7057 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7058 (mac_addr[4] << 8) | mac_addr[5];
Eilon Greenstein3196a882008-08-13 15:58:49 -07007059 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007060
7061 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007062
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007063 } else
7064 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7065
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007066 /* Close multi and leading connections
7067 Completions for ramrods are collected in a synchronous way */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007068 for_each_queue(bp, i)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007069
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007070 if (bnx2x_stop_client(bp, i))
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007071#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007072 return;
7073#else
7074 goto unload_error;
7075#endif
7076
7077 rc = bnx2x_func_stop(bp);
7078 if (rc) {
7079 BNX2X_ERR("Function stop failed!\n");
7080#ifdef BNX2X_STOP_ON_ERROR
7081 return;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007082#else
7083 goto unload_error;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007084#endif
Eliezer Tamir228241e2008-02-28 11:56:57 -08007085 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007086#ifndef BNX2X_STOP_ON_ERROR
Eliezer Tamir228241e2008-02-28 11:56:57 -08007087unload_error:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007088#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007089 if (!BP_NOMCP(bp))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007090 reset_code = bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007091 else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007092 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
7093 "%d, %d, %d\n", BP_PATH(bp),
7094 load_count[BP_PATH(bp)][0],
7095 load_count[BP_PATH(bp)][1],
7096 load_count[BP_PATH(bp)][2]);
7097 load_count[BP_PATH(bp)][0]--;
7098 load_count[BP_PATH(bp)][1 + port]--;
7099 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
7100 "%d, %d, %d\n", BP_PATH(bp),
7101 load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
7102 load_count[BP_PATH(bp)][2]);
7103 if (load_count[BP_PATH(bp)][0] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007104 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007105 else if (load_count[BP_PATH(bp)][1 + port] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007106 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7107 else
7108 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7109 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007110
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007111 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7112 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7113 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007114
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007115 /* Disable HW interrupts, NAPI */
7116 bnx2x_netif_stop(bp, 1);
7117
7118 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00007119 bnx2x_free_irq(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007120
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007121 /* Reset the chip */
Eliezer Tamir228241e2008-02-28 11:56:57 -08007122 bnx2x_reset_chip(bp, reset_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007123
7124 /* Report UNLOAD_DONE to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007125 if (!BP_NOMCP(bp))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007126 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
Eilon Greenstein356e2382009-02-12 08:38:32 +00007127
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007128}
7129
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00007130void bnx2x_disable_close_the_gate(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007131{
7132 u32 val;
7133
7134 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
7135
7136 if (CHIP_IS_E1(bp)) {
7137 int port = BP_PORT(bp);
7138 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7139 MISC_REG_AEU_MASK_ATTN_FUNC_0;
7140
7141 val = REG_RD(bp, addr);
7142 val &= ~(0x300);
7143 REG_WR(bp, addr, val);
7144 } else if (CHIP_IS_E1H(bp)) {
7145 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
7146 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
7147 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
7148 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
7149 }
7150}
7151
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007152/* Close gates #2, #3 and #4: */
7153static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
7154{
7155 u32 val, addr;
7156
7157 /* Gates #2 and #4a are closed/opened for "not E1" only */
7158 if (!CHIP_IS_E1(bp)) {
7159 /* #4 */
7160 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
7161 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
7162 close ? (val | 0x1) : (val & (~(u32)1)));
7163 /* #2 */
7164 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
7165 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
7166 close ? (val | 0x1) : (val & (~(u32)1)));
7167 }
7168
7169 /* #3 */
7170 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
7171 val = REG_RD(bp, addr);
7172 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
7173
7174 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
7175 close ? "closing" : "opening");
7176 mmiowb();
7177}
7178
7179#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
7180
7181static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
7182{
7183 /* Do some magic... */
7184 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7185 *magic_val = val & SHARED_MF_CLP_MAGIC;
7186 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
7187}
7188
Dmitry Kravkove8920672011-05-04 23:52:40 +00007189/**
7190 * bnx2x_clp_reset_done - restore the value of the `magic' bit.
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007191 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00007192 * @bp: driver handle
7193 * @magic_val: old value of the `magic' bit.
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007194 */
7195static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
7196{
7197 /* Restore the `magic' bit value... */
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007198 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7199 MF_CFG_WR(bp, shared_mf_config.clp_mb,
7200 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
7201}
7202
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007203/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00007204 * bnx2x_reset_mcp_prep - prepare for MCP reset.
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007205 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00007206 * @bp: driver handle
7207 * @magic_val: old value of 'magic' bit.
7208 *
7209 * Takes care of CLP configurations.
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007210 */
7211static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
7212{
7213 u32 shmem;
7214 u32 validity_offset;
7215
7216 DP(NETIF_MSG_HW, "Starting\n");
7217
7218 /* Set `magic' bit in order to save MF config */
7219 if (!CHIP_IS_E1(bp))
7220 bnx2x_clp_reset_prep(bp, magic_val);
7221
7222 /* Get shmem offset */
7223 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7224 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7225
7226 /* Clear validity map flags */
7227 if (shmem > 0)
7228 REG_WR(bp, shmem + validity_offset, 0);
7229}
7230
7231#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
7232#define MCP_ONE_TIMEOUT 100 /* 100 ms */
7233
Dmitry Kravkove8920672011-05-04 23:52:40 +00007234/**
7235 * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007236 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00007237 * @bp: driver handle
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007238 */
7239static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
7240{
7241 /* special handling for emulation and FPGA,
7242 wait 10 times longer */
7243 if (CHIP_REV_IS_SLOW(bp))
7244 msleep(MCP_ONE_TIMEOUT*10);
7245 else
7246 msleep(MCP_ONE_TIMEOUT);
7247}
7248
Dmitry Kravkov1b6e2ce2011-05-22 10:11:26 +00007249/*
7250 * initializes bp->common.shmem_base and waits for validity signature to appear
7251 */
7252static int bnx2x_init_shmem(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007253{
Dmitry Kravkov1b6e2ce2011-05-22 10:11:26 +00007254 int cnt = 0;
7255 u32 val = 0;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007256
Dmitry Kravkov1b6e2ce2011-05-22 10:11:26 +00007257 do {
7258 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7259 if (bp->common.shmem_base) {
7260 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7261 if (val & SHR_MEM_VALIDITY_MB)
7262 return 0;
7263 }
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007264
7265 bnx2x_mcp_wait_one(bp);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007266
Dmitry Kravkov1b6e2ce2011-05-22 10:11:26 +00007267 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007268
Dmitry Kravkov1b6e2ce2011-05-22 10:11:26 +00007269 BNX2X_ERR("BAD MCP validity signature\n");
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007270
Dmitry Kravkov1b6e2ce2011-05-22 10:11:26 +00007271 return -ENODEV;
7272}
7273
7274static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
7275{
7276 int rc = bnx2x_init_shmem(bp);
7277
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007278 /* Restore the `magic' bit value */
7279 if (!CHIP_IS_E1(bp))
7280 bnx2x_clp_reset_done(bp, magic_val);
7281
7282 return rc;
7283}
7284
7285static void bnx2x_pxp_prep(struct bnx2x *bp)
7286{
7287 if (!CHIP_IS_E1(bp)) {
7288 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
7289 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
7290 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
7291 mmiowb();
7292 }
7293}
7294
7295/*
7296 * Reset the whole chip except for:
7297 * - PCIE core
7298 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
7299 * one reset bit)
7300 * - IGU
7301 * - MISC (including AEU)
7302 * - GRC
7303 * - RBCN, RBCP
7304 */
7305static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
7306{
7307 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
7308
7309 not_reset_mask1 =
7310 MISC_REGISTERS_RESET_REG_1_RST_HC |
7311 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
7312 MISC_REGISTERS_RESET_REG_1_RST_PXP;
7313
7314 not_reset_mask2 =
7315 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
7316 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
7317 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
7318 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
7319 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
7320 MISC_REGISTERS_RESET_REG_2_RST_GRC |
7321 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
7322 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
7323
7324 reset_mask1 = 0xffffffff;
7325
7326 if (CHIP_IS_E1(bp))
7327 reset_mask2 = 0xffff;
7328 else
7329 reset_mask2 = 0x1ffff;
7330
7331 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7332 reset_mask1 & (~not_reset_mask1));
7333 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7334 reset_mask2 & (~not_reset_mask2));
7335
7336 barrier();
7337 mmiowb();
7338
7339 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
7340 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
7341 mmiowb();
7342}
7343
7344static int bnx2x_process_kill(struct bnx2x *bp)
7345{
7346 int cnt = 1000;
7347 u32 val = 0;
7348 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
7349
7350
7351 /* Empty the Tetris buffer, wait for 1s */
7352 do {
7353 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
7354 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
7355 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
7356 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
7357 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
7358 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
7359 ((port_is_idle_0 & 0x1) == 0x1) &&
7360 ((port_is_idle_1 & 0x1) == 0x1) &&
7361 (pgl_exp_rom2 == 0xffffffff))
7362 break;
7363 msleep(1);
7364 } while (cnt-- > 0);
7365
7366 if (cnt <= 0) {
7367 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
7368 " are still"
7369 " outstanding read requests after 1s!\n");
7370 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
7371 " port_is_idle_0=0x%08x,"
7372 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
7373 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
7374 pgl_exp_rom2);
7375 return -EAGAIN;
7376 }
7377
7378 barrier();
7379
7380 /* Close gates #2, #3 and #4 */
7381 bnx2x_set_234_gates(bp, true);
7382
7383 /* TBD: Indicate that "process kill" is in progress to MCP */
7384
7385 /* Clear "unprepared" bit */
7386 REG_WR(bp, MISC_REG_UNPREPARED, 0);
7387 barrier();
7388
7389 /* Make sure all is written to the chip before the reset */
7390 mmiowb();
7391
7392 /* Wait for 1ms to empty GLUE and PCI-E core queues,
7393 * PSWHST, GRC and PSWRD Tetris buffer.
7394 */
7395 msleep(1);
7396
7397 /* Prepare to chip reset: */
7398 /* MCP */
7399 bnx2x_reset_mcp_prep(bp, &val);
7400
7401 /* PXP */
7402 bnx2x_pxp_prep(bp);
7403 barrier();
7404
7405 /* reset the chip */
7406 bnx2x_process_kill_chip_reset(bp);
7407 barrier();
7408
7409 /* Recover after reset: */
7410 /* MCP */
7411 if (bnx2x_reset_mcp_comp(bp, val))
7412 return -EAGAIN;
7413
7414 /* PXP */
7415 bnx2x_pxp_prep(bp);
7416
7417 /* Open the gates #2, #3 and #4 */
7418 bnx2x_set_234_gates(bp, false);
7419
7420 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7421 * reset state, re-enable attentions. */
7422
7423 return 0;
7424}
7425
7426static int bnx2x_leader_reset(struct bnx2x *bp)
7427{
7428 int rc = 0;
7429 /* Try to recover after the failure */
7430 if (bnx2x_process_kill(bp)) {
7431 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
7432 bp->dev->name);
7433 rc = -EAGAIN;
7434 goto exit_leader_reset;
7435 }
7436
7437 /* Clear "reset is in progress" bit and update the driver state */
7438 bnx2x_set_reset_done(bp);
7439 bp->recovery_state = BNX2X_RECOVERY_DONE;
7440
7441exit_leader_reset:
7442 bp->is_leader = 0;
7443 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
7444 smp_wmb();
7445 return rc;
7446}
7447
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007448/* Assumption: runs under rtnl lock. This together with the fact
7449 * that it's called only from bnx2x_reset_task() ensure that it
7450 * will never be called when netif_running(bp->dev) is false.
7451 */
7452static void bnx2x_parity_recover(struct bnx2x *bp)
7453{
7454 DP(NETIF_MSG_HW, "Handling parity\n");
7455 while (1) {
7456 switch (bp->recovery_state) {
7457 case BNX2X_RECOVERY_INIT:
7458 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
7459 /* Try to get a LEADER_LOCK HW lock */
7460 if (bnx2x_trylock_hw_lock(bp,
7461 HW_LOCK_RESOURCE_RESERVED_08))
7462 bp->is_leader = 1;
7463
7464 /* Stop the driver */
7465 /* If interface has been removed - break */
7466 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
7467 return;
7468
7469 bp->recovery_state = BNX2X_RECOVERY_WAIT;
7470 /* Ensure "is_leader" and "recovery_state"
7471 * update values are seen on other CPUs
7472 */
7473 smp_wmb();
7474 break;
7475
7476 case BNX2X_RECOVERY_WAIT:
7477 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
7478 if (bp->is_leader) {
7479 u32 load_counter = bnx2x_get_load_cnt(bp);
7480 if (load_counter) {
7481 /* Wait until all other functions get
7482 * down.
7483 */
7484 schedule_delayed_work(&bp->reset_task,
7485 HZ/10);
7486 return;
7487 } else {
7488 /* If all other functions got down -
7489 * try to bring the chip back to
7490 * normal. In any case it's an exit
7491 * point for a leader.
7492 */
7493 if (bnx2x_leader_reset(bp) ||
7494 bnx2x_nic_load(bp, LOAD_NORMAL)) {
7495 printk(KERN_ERR"%s: Recovery "
7496 "has failed. Power cycle is "
7497 "needed.\n", bp->dev->name);
7498 /* Disconnect this device */
7499 netif_device_detach(bp->dev);
7500 /* Block ifup for all function
7501 * of this ASIC until
7502 * "process kill" or power
7503 * cycle.
7504 */
7505 bnx2x_set_reset_in_progress(bp);
7506 /* Shut down the power */
7507 bnx2x_set_power_state(bp,
7508 PCI_D3hot);
7509 return;
7510 }
7511
7512 return;
7513 }
7514 } else { /* non-leader */
7515 if (!bnx2x_reset_is_done(bp)) {
7516 /* Try to get a LEADER_LOCK HW lock as
7517 * long as a former leader may have
7518 * been unloaded by the user or
7519 * released a leadership by another
7520 * reason.
7521 */
7522 if (bnx2x_trylock_hw_lock(bp,
7523 HW_LOCK_RESOURCE_RESERVED_08)) {
7524 /* I'm a leader now! Restart a
7525 * switch case.
7526 */
7527 bp->is_leader = 1;
7528 break;
7529 }
7530
7531 schedule_delayed_work(&bp->reset_task,
7532 HZ/10);
7533 return;
7534
7535 } else { /* A leader has completed
7536 * the "process kill". It's an exit
7537 * point for a non-leader.
7538 */
7539 bnx2x_nic_load(bp, LOAD_NORMAL);
7540 bp->recovery_state =
7541 BNX2X_RECOVERY_DONE;
7542 smp_wmb();
7543 return;
7544 }
7545 }
7546 default:
7547 return;
7548 }
7549 }
7550}
7551
7552/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7553 * scheduled on a general queue in order to prevent a dead lock.
7554 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007555static void bnx2x_reset_task(struct work_struct *work)
7556{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007557 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007558
7559#ifdef BNX2X_STOP_ON_ERROR
7560 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7561 " so reset not done to allow debug dump,\n"
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007562 KERN_ERR " you will need to reboot when done\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007563 return;
7564#endif
7565
7566 rtnl_lock();
7567
7568 if (!netif_running(bp->dev))
7569 goto reset_task_exit;
7570
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007571 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
7572 bnx2x_parity_recover(bp);
7573 else {
7574 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7575 bnx2x_nic_load(bp, LOAD_NORMAL);
7576 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007577
7578reset_task_exit:
7579 rtnl_unlock();
7580}
7581
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007582/* end of nic load/unload */
7583
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007584/*
7585 * Init service functions
7586 */
7587
stephen hemminger8d962862010-10-21 07:50:56 +00007588static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007589{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007590 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
7591 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
7592 return base + (BP_ABS_FUNC(bp)) * stride;
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007593}
7594
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007595static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007596{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007597 u32 reg = bnx2x_get_pretend_reg(bp);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007598
7599 /* Flush all outstanding writes */
7600 mmiowb();
7601
7602 /* Pretend to be function 0 */
7603 REG_WR(bp, reg, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007604 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007605
7606 /* From now we are in the "like-E1" mode */
7607 bnx2x_int_disable(bp);
7608
7609 /* Flush all outstanding writes */
7610 mmiowb();
7611
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007612 /* Restore the original function */
7613 REG_WR(bp, reg, BP_ABS_FUNC(bp));
7614 REG_RD(bp, reg);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007615}
7616
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007617static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007618{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007619 if (CHIP_IS_E1(bp))
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007620 bnx2x_int_disable(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007621 else
7622 bnx2x_undi_int_disable_e1h(bp);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007623}
7624
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007625static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007626{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007627 u32 val;
7628
7629 /* Check if there is any driver already loaded */
7630 val = REG_RD(bp, MISC_REG_UNPREPARED);
7631 if (val == 0x1) {
7632 /* Check if it is the UNDI driver
7633 * UNDI driver initializes CID offset for normal bell to 0x7
7634 */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07007635 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007636 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7637 if (val == 0x7) {
7638 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007639 /* save our pf_num */
7640 int orig_pf_num = bp->pf_num;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007641 u32 swap_en;
7642 u32 swap_val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007643
Eilon Greensteinb4661732009-01-14 06:43:56 +00007644 /* clear the UNDI indication */
7645 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7646
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007647 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7648
7649 /* try unload UNDI on port 0 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007650 bp->pf_num = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007651 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007652 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007653 DRV_MSG_SEQ_NUMBER_MASK);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007654 reset_code = bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007655
7656 /* if UNDI is loaded on the other port */
7657 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7658
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007659 /* send "DONE" for previous unload */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007660 bnx2x_fw_command(bp,
7661 DRV_MSG_CODE_UNLOAD_DONE, 0);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007662
7663 /* unload UNDI on port 1 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007664 bp->pf_num = 1;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007665 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007666 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007667 DRV_MSG_SEQ_NUMBER_MASK);
7668 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007669
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007670 bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007671 }
7672
Eilon Greensteinb4661732009-01-14 06:43:56 +00007673 /* now it's safe to release the lock */
7674 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7675
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007676 bnx2x_undi_int_disable(bp);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007677
7678 /* close input traffic and wait for it */
7679 /* Do not rcv packets to BRB */
7680 REG_WR(bp,
7681 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7682 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7683 /* Do not direct rcv packets that are not for MCP to
7684 * the BRB */
7685 REG_WR(bp,
7686 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7687 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7688 /* clear AEU */
7689 REG_WR(bp,
7690 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7691 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7692 msleep(10);
7693
7694 /* save NIG port swap info */
7695 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7696 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007697 /* reset device */
7698 REG_WR(bp,
7699 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007700 0xd3ffffff);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007701 REG_WR(bp,
7702 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7703 0x1403);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007704 /* take the NIG out of reset and restore swap values */
7705 REG_WR(bp,
7706 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7707 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7708 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7709 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7710
7711 /* send unload done to the MCP */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007712 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007713
7714 /* restore our func and fw_seq */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007715 bp->pf_num = orig_pf_num;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007716 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007717 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007718 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greensteinb4661732009-01-14 06:43:56 +00007719 } else
7720 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007721 }
7722}
7723
7724static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7725{
7726 u32 val, val2, val3, val4, id;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07007727 u16 pmc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007728
7729 /* Get the chip revision id and number. */
7730 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7731 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7732 id = ((val & 0xffff) << 16);
7733 val = REG_RD(bp, MISC_REG_CHIP_REV);
7734 id |= ((val & 0xf) << 12);
7735 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7736 id |= ((val & 0xff) << 4);
Eilon Greenstein5a40e082009-01-14 06:44:04 +00007737 val = REG_RD(bp, MISC_REG_BOND_ID);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007738 id |= (val & 0xf);
7739 bp->common.chip_id = id;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007740
7741 /* Set doorbell size */
7742 bp->db_size = (1 << BNX2X_DB_SHIFT);
7743
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007744 if (CHIP_IS_E2(bp)) {
7745 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7746 if ((val & 1) == 0)
7747 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7748 else
7749 val = (val >> 1) & 1;
7750 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7751 "2_PORT_MODE");
7752 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7753 CHIP_2_PORT_MODE;
7754
7755 if (CHIP_MODE_IS_4_PORT(bp))
7756 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
7757 else
7758 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
7759 } else {
7760 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
7761 bp->pfid = bp->pf_num; /* 0..7 */
7762 }
7763
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007764 /*
7765 * set base FW non-default (fast path) status block id, this value is
7766 * used to initialize the fw_sb_id saved on the fp/queue structure to
7767 * determine the id used by the FW.
7768 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007769 if (CHIP_IS_E1x(bp))
7770 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7771 else /* E2 */
7772 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7773
7774 bp->link_params.chip_id = bp->common.chip_id;
7775 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007776
Eilon Greenstein1c063282009-02-12 08:36:43 +00007777 val = (REG_RD(bp, 0x2874) & 0x55);
7778 if ((bp->common.chip_id & 0x1) ||
7779 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7780 bp->flags |= ONE_PORT_FLAG;
7781 BNX2X_DEV_INFO("single port device\n");
7782 }
7783
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007784 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7785 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7786 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7787 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7788 bp->common.flash_size, bp->common.flash_size);
7789
Dmitry Kravkov1b6e2ce2011-05-22 10:11:26 +00007790 bnx2x_init_shmem(bp);
7791
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007792 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
7793 MISC_REG_GENERIC_CR_1 :
7794 MISC_REG_GENERIC_CR_0));
Dmitry Kravkov1b6e2ce2011-05-22 10:11:26 +00007795
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007796 bp->link_params.shmem_base = bp->common.shmem_base;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007797 bp->link_params.shmem2_base = bp->common.shmem2_base;
Eilon Greenstein2691d512009-08-12 08:22:08 +00007798 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7799 bp->common.shmem_base, bp->common.shmem2_base);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007800
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007801 if (!bp->common.shmem_base) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007802 BNX2X_DEV_INFO("MCP not active\n");
7803 bp->flags |= NO_MCP_FLAG;
7804 return;
7805 }
7806
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007807 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00007808 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007809
7810 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7811 SHARED_HW_CFG_LED_MODE_MASK) >>
7812 SHARED_HW_CFG_LED_MODE_SHIFT);
7813
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00007814 bp->link_params.feature_config_flags = 0;
7815 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7816 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7817 bp->link_params.feature_config_flags |=
7818 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7819 else
7820 bp->link_params.feature_config_flags &=
7821 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7822
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007823 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7824 bp->common.bc_ver = val;
7825 BNX2X_DEV_INFO("bc_ver %X\n", val);
7826 if (val < BNX2X_BC_VER) {
7827 /* for now only warn
7828 * later we might need to enforce this */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007829 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
7830 "please upgrade BC\n", BNX2X_BC_VER, val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007831 }
Eilon Greenstein4d295db2009-07-21 05:47:47 +00007832 bp->link_params.feature_config_flags |=
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007833 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007834 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7835
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007836 bp->link_params.feature_config_flags |=
7837 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
7838 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07007839
Dmitry Kravkovf9a3ebb2011-05-04 23:49:11 +00007840 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7841 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7842
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07007843 BNX2X_DEV_INFO("%sWoL capable\n",
Eilon Greensteinf5372252009-02-12 08:38:30 +00007844 (bp->flags & NO_WOL_FLAG) ? "not " : "");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007845
7846 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7847 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7848 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7849 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7850
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007851 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
7852 val, val2, val3, val4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007853}
7854
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007855#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7856#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7857
7858static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7859{
7860 int pfid = BP_FUNC(bp);
7861 int vn = BP_E1HVN(bp);
7862 int igu_sb_id;
7863 u32 val;
7864 u8 fid;
7865
7866 bp->igu_base_sb = 0xff;
7867 bp->igu_sb_cnt = 0;
7868 if (CHIP_INT_MODE_IS_BC(bp)) {
7869 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00007870 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007871
7872 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
7873 FP_SB_MAX_E1x;
7874
7875 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
7876 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
7877
7878 return;
7879 }
7880
7881 /* IGU in normal mode - read CAM */
7882 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
7883 igu_sb_id++) {
7884 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
7885 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
7886 continue;
7887 fid = IGU_FID(val);
7888 if ((fid & IGU_FID_ENCODE_IS_PF)) {
7889 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
7890 continue;
7891 if (IGU_VEC(val) == 0)
7892 /* default status block */
7893 bp->igu_dsb_id = igu_sb_id;
7894 else {
7895 if (bp->igu_base_sb == 0xff)
7896 bp->igu_base_sb = igu_sb_id;
7897 bp->igu_sb_cnt++;
7898 }
7899 }
7900 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00007901 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
7902 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007903 if (bp->igu_sb_cnt == 0)
7904 BNX2X_ERR("CAM configuration error\n");
7905}
7906
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007907static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7908 u32 switch_cfg)
7909{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007910 int cfg_size = 0, idx, port = BP_PORT(bp);
7911
7912 /* Aggregation of supported attributes of all external phys */
7913 bp->port.supported[0] = 0;
7914 bp->port.supported[1] = 0;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007915 switch (bp->link_params.num_phys) {
7916 case 1:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007917 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
7918 cfg_size = 1;
7919 break;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007920 case 2:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007921 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
7922 cfg_size = 1;
7923 break;
7924 case 3:
7925 if (bp->link_params.multi_phy_config &
7926 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
7927 bp->port.supported[1] =
7928 bp->link_params.phy[EXT_PHY1].supported;
7929 bp->port.supported[0] =
7930 bp->link_params.phy[EXT_PHY2].supported;
7931 } else {
7932 bp->port.supported[0] =
7933 bp->link_params.phy[EXT_PHY1].supported;
7934 bp->port.supported[1] =
7935 bp->link_params.phy[EXT_PHY2].supported;
7936 }
7937 cfg_size = 2;
7938 break;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007939 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007940
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007941 if (!(bp->port.supported[0] || bp->port.supported[1])) {
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007942 BNX2X_ERR("NVRAM config error. BAD phy config."
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007943 "PHY1 config 0x%x, PHY2 config 0x%x\n",
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007944 SHMEM_RD(bp,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007945 dev_info.port_hw_config[port].external_phy_config),
7946 SHMEM_RD(bp,
7947 dev_info.port_hw_config[port].external_phy_config2));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007948 return;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007949 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007950
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007951 switch (switch_cfg) {
7952 case SWITCH_CFG_1G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007953 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7954 port*0x10);
7955 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007956 break;
7957
7958 case SWITCH_CFG_10G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007959 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7960 port*0x18);
7961 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007962 break;
7963
7964 default:
7965 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007966 bp->port.link_config[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007967 return;
7968 }
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007969 /* mask what we support according to speed_cap_mask per configuration */
7970 for (idx = 0; idx < cfg_size; idx++) {
7971 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007972 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007973 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007974
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007975 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007976 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007977 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007978
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007979 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007980 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007981 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007982
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007983 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007984 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007985 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007986
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007987 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007988 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007989 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007990 SUPPORTED_1000baseT_Full);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007991
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007992 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007993 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007994 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007995
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007996 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007997 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007998 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007999
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008000 }
8001
8002 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
8003 bp->port.supported[1]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008004}
8005
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008006static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008007{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008008 u32 link_config, idx, cfg_size = 0;
8009 bp->port.advertising[0] = 0;
8010 bp->port.advertising[1] = 0;
8011 switch (bp->link_params.num_phys) {
8012 case 1:
8013 case 2:
8014 cfg_size = 1;
8015 break;
8016 case 3:
8017 cfg_size = 2;
8018 break;
8019 }
8020 for (idx = 0; idx < cfg_size; idx++) {
8021 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
8022 link_config = bp->port.link_config[idx];
8023 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008024 case PORT_FEATURE_LINK_SPEED_AUTO:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008025 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
8026 bp->link_params.req_line_speed[idx] =
8027 SPEED_AUTO_NEG;
8028 bp->port.advertising[idx] |=
8029 bp->port.supported[idx];
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008030 } else {
8031 /* force 10G, no AN */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008032 bp->link_params.req_line_speed[idx] =
8033 SPEED_10000;
8034 bp->port.advertising[idx] |=
8035 (ADVERTISED_10000baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008036 ADVERTISED_FIBRE);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008037 continue;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008038 }
8039 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008040
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008041 case PORT_FEATURE_LINK_SPEED_10M_FULL:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008042 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
8043 bp->link_params.req_line_speed[idx] =
8044 SPEED_10;
8045 bp->port.advertising[idx] |=
8046 (ADVERTISED_10baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008047 ADVERTISED_TP);
8048 } else {
8049 BNX2X_ERROR("NVRAM config error. "
8050 "Invalid link_config 0x%x"
8051 " speed_cap_mask 0x%x\n",
8052 link_config,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008053 bp->link_params.speed_cap_mask[idx]);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008054 return;
8055 }
8056 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008057
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008058 case PORT_FEATURE_LINK_SPEED_10M_HALF:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008059 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
8060 bp->link_params.req_line_speed[idx] =
8061 SPEED_10;
8062 bp->link_params.req_duplex[idx] =
8063 DUPLEX_HALF;
8064 bp->port.advertising[idx] |=
8065 (ADVERTISED_10baseT_Half |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008066 ADVERTISED_TP);
8067 } else {
8068 BNX2X_ERROR("NVRAM config error. "
8069 "Invalid link_config 0x%x"
8070 " speed_cap_mask 0x%x\n",
8071 link_config,
8072 bp->link_params.speed_cap_mask[idx]);
8073 return;
8074 }
8075 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008076
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008077 case PORT_FEATURE_LINK_SPEED_100M_FULL:
8078 if (bp->port.supported[idx] &
8079 SUPPORTED_100baseT_Full) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008080 bp->link_params.req_line_speed[idx] =
8081 SPEED_100;
8082 bp->port.advertising[idx] |=
8083 (ADVERTISED_100baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008084 ADVERTISED_TP);
8085 } else {
8086 BNX2X_ERROR("NVRAM config error. "
8087 "Invalid link_config 0x%x"
8088 " speed_cap_mask 0x%x\n",
8089 link_config,
8090 bp->link_params.speed_cap_mask[idx]);
8091 return;
8092 }
8093 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008094
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008095 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8096 if (bp->port.supported[idx] &
8097 SUPPORTED_100baseT_Half) {
8098 bp->link_params.req_line_speed[idx] =
8099 SPEED_100;
8100 bp->link_params.req_duplex[idx] =
8101 DUPLEX_HALF;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008102 bp->port.advertising[idx] |=
8103 (ADVERTISED_100baseT_Half |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008104 ADVERTISED_TP);
8105 } else {
8106 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008107 "Invalid link_config 0x%x"
8108 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008109 link_config,
8110 bp->link_params.speed_cap_mask[idx]);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008111 return;
8112 }
8113 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008114
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008115 case PORT_FEATURE_LINK_SPEED_1G:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008116 if (bp->port.supported[idx] &
8117 SUPPORTED_1000baseT_Full) {
8118 bp->link_params.req_line_speed[idx] =
8119 SPEED_1000;
8120 bp->port.advertising[idx] |=
8121 (ADVERTISED_1000baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008122 ADVERTISED_TP);
8123 } else {
8124 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008125 "Invalid link_config 0x%x"
8126 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008127 link_config,
8128 bp->link_params.speed_cap_mask[idx]);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008129 return;
8130 }
8131 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008132
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008133 case PORT_FEATURE_LINK_SPEED_2_5G:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008134 if (bp->port.supported[idx] &
8135 SUPPORTED_2500baseX_Full) {
8136 bp->link_params.req_line_speed[idx] =
8137 SPEED_2500;
8138 bp->port.advertising[idx] |=
8139 (ADVERTISED_2500baseX_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008140 ADVERTISED_TP);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008141 } else {
8142 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008143 "Invalid link_config 0x%x"
8144 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008145 link_config,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008146 bp->link_params.speed_cap_mask[idx]);
8147 return;
8148 }
8149 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008150
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008151 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8152 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8153 case PORT_FEATURE_LINK_SPEED_10G_KR:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008154 if (bp->port.supported[idx] &
8155 SUPPORTED_10000baseT_Full) {
8156 bp->link_params.req_line_speed[idx] =
8157 SPEED_10000;
8158 bp->port.advertising[idx] |=
8159 (ADVERTISED_10000baseT_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008160 ADVERTISED_FIBRE);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008161 } else {
8162 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008163 "Invalid link_config 0x%x"
8164 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008165 link_config,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008166 bp->link_params.speed_cap_mask[idx]);
8167 return;
8168 }
8169 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008170
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008171 default:
8172 BNX2X_ERROR("NVRAM config error. "
8173 "BAD link speed link_config 0x%x\n",
8174 link_config);
8175 bp->link_params.req_line_speed[idx] =
8176 SPEED_AUTO_NEG;
8177 bp->port.advertising[idx] =
8178 bp->port.supported[idx];
8179 break;
8180 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008181
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008182 bp->link_params.req_flow_ctrl[idx] = (link_config &
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008183 PORT_FEATURE_FLOW_CONTROL_MASK);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008184 if ((bp->link_params.req_flow_ctrl[idx] ==
8185 BNX2X_FLOW_CTRL_AUTO) &&
8186 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
8187 bp->link_params.req_flow_ctrl[idx] =
8188 BNX2X_FLOW_CTRL_NONE;
8189 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008190
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008191 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
8192 " 0x%x advertising 0x%x\n",
8193 bp->link_params.req_line_speed[idx],
8194 bp->link_params.req_duplex[idx],
8195 bp->link_params.req_flow_ctrl[idx],
8196 bp->port.advertising[idx]);
8197 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008198}
8199
Michael Chane665bfd2009-10-10 13:46:54 +00008200static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8201{
8202 mac_hi = cpu_to_be16(mac_hi);
8203 mac_lo = cpu_to_be32(mac_lo);
8204 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8205 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8206}
8207
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008208static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008209{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008210 int port = BP_PORT(bp);
Eilon Greenstein589abe32009-02-12 08:36:55 +00008211 u32 config;
Joe Perches6f38ad92010-11-14 17:04:31 +00008212 u32 ext_phy_type, ext_phy_config;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008213
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008214 bp->link_params.bp = bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008215 bp->link_params.port = port;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008216
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008217 bp->link_params.lane_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008218 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008219
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008220 bp->link_params.speed_cap_mask[0] =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008221 SHMEM_RD(bp,
8222 dev_info.port_hw_config[port].speed_capability_mask);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008223 bp->link_params.speed_cap_mask[1] =
8224 SHMEM_RD(bp,
8225 dev_info.port_hw_config[port].speed_capability_mask2);
8226 bp->port.link_config[0] =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008227 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8228
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008229 bp->port.link_config[1] =
8230 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008231
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008232 bp->link_params.multi_phy_config =
8233 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008234 /* If the device is capable of WoL, set the default state according
8235 * to the HW
8236 */
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008237 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008238 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8239 (config & PORT_FEATURE_WOL_ENABLED));
8240
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008241 BNX2X_DEV_INFO("lane_config 0x%08x "
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008242 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008243 bp->link_params.lane_config,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008244 bp->link_params.speed_cap_mask[0],
8245 bp->port.link_config[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008246
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008247 bp->link_params.switch_cfg = (bp->port.link_config[0] &
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008248 PORT_FEATURE_CONNECTED_SWITCH_MASK);
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008249 bnx2x_phy_probe(&bp->link_params);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008250 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008251
8252 bnx2x_link_settings_requested(bp);
8253
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008254 /*
8255 * If connected directly, work with the internal PHY, otherwise, work
8256 * with the external PHY
8257 */
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008258 ext_phy_config =
8259 SHMEM_RD(bp,
8260 dev_info.port_hw_config[port].external_phy_config);
8261 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008262 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008263 bp->mdio.prtad = bp->port.phy_addr;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008264
8265 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8266 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8267 bp->mdio.prtad =
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008268 XGXS_EXT_PHY_ADDR(ext_phy_config);
Yaniv Rosner5866df62011-01-30 04:15:07 +00008269
8270 /*
8271 * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
8272 * In MF mode, it is set to cover self test cases
8273 */
8274 if (IS_MF(bp))
8275 bp->port.need_hw_lock = 1;
8276 else
8277 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
8278 bp->common.shmem_base,
8279 bp->common.shmem2_base);
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008280}
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008281
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00008282#ifdef BCM_CNIC
8283static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
8284{
8285 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
8286 drv_lic_key[BP_PORT(bp)].max_iscsi_conn);
8287 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
8288 drv_lic_key[BP_PORT(bp)].max_fcoe_conn);
8289
8290 /* Get the number of maximum allowed iSCSI and FCoE connections */
8291 bp->cnic_eth_dev.max_iscsi_conn =
8292 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
8293 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
8294
8295 bp->cnic_eth_dev.max_fcoe_conn =
8296 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
8297 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
8298
8299 BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
8300 bp->cnic_eth_dev.max_iscsi_conn,
8301 bp->cnic_eth_dev.max_fcoe_conn);
8302
8303 /* If mamimum allowed number of connections is zero -
8304 * disable the feature.
8305 */
8306 if (!bp->cnic_eth_dev.max_iscsi_conn)
8307 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
8308
8309 if (!bp->cnic_eth_dev.max_fcoe_conn)
8310 bp->flags |= NO_FCOE_FLAG;
8311}
8312#endif
8313
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008314static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8315{
8316 u32 val, val2;
8317 int func = BP_ABS_FUNC(bp);
8318 int port = BP_PORT(bp);
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00008319#ifdef BCM_CNIC
8320 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
8321 u8 *fip_mac = bp->fip_mac;
8322#endif
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008323
8324 if (BP_NOMCP(bp)) {
8325 BNX2X_ERROR("warning: random MAC workaround active\n");
8326 random_ether_addr(bp->dev->dev_addr);
8327 } else if (IS_MF(bp)) {
8328 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8329 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
8330 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8331 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
8332 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8333
8334#ifdef BCM_CNIC
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00008335 /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
8336 * FCoE MAC then the appropriate feature should be disabled.
8337 */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008338 if (IS_MF_SI(bp)) {
8339 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
8340 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
8341 val2 = MF_CFG_RD(bp, func_ext_config[func].
8342 iscsi_mac_addr_upper);
8343 val = MF_CFG_RD(bp, func_ext_config[func].
8344 iscsi_mac_addr_lower);
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00008345 BNX2X_DEV_INFO("Read iSCSI MAC: "
8346 "0x%x:0x%04x\n", val2, val);
8347 bnx2x_set_mac_buf(iscsi_mac, val, val2);
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00008348 } else
8349 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
8350
8351 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
8352 val2 = MF_CFG_RD(bp, func_ext_config[func].
8353 fcoe_mac_addr_upper);
8354 val = MF_CFG_RD(bp, func_ext_config[func].
8355 fcoe_mac_addr_lower);
8356 BNX2X_DEV_INFO("Read FCoE MAC to "
8357 "0x%x:0x%04x\n", val2, val);
8358 bnx2x_set_mac_buf(fip_mac, val, val2);
8359
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00008360 } else
8361 bp->flags |= NO_FCOE_FLAG;
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008362 }
8363#endif
8364 } else {
8365 /* in SF read MACs from port configuration */
8366 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8367 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8368 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8369
8370#ifdef BCM_CNIC
8371 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
8372 iscsi_mac_upper);
8373 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
8374 iscsi_mac_lower);
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00008375 bnx2x_set_mac_buf(iscsi_mac, val, val2);
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008376#endif
8377 }
8378
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008379 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8380 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
Michael Chan37b091b2009-10-10 13:46:55 +00008381
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00008382#ifdef BCM_CNIC
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00008383 /* Set the FCoE MAC in modes other then MF_SI */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00008384 if (!CHIP_IS_E1x(bp)) {
8385 if (IS_MF_SD(bp))
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00008386 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
8387 else if (!IS_MF(bp))
8388 memcpy(fip_mac, iscsi_mac, ETH_ALEN);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00008389 }
Dmitry Kravkov426b9242011-05-04 23:49:53 +00008390
8391 /* Disable iSCSI if MAC configuration is
8392 * invalid.
8393 */
8394 if (!is_valid_ether_addr(iscsi_mac)) {
8395 bp->flags |= NO_ISCSI_FLAG;
8396 memset(iscsi_mac, 0, ETH_ALEN);
8397 }
8398
8399 /* Disable FCoE if MAC configuration is
8400 * invalid.
8401 */
8402 if (!is_valid_ether_addr(fip_mac)) {
8403 bp->flags |= NO_FCOE_FLAG;
8404 memset(bp->fip_mac, 0, ETH_ALEN);
8405 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00008406#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008407}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008408
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008409static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8410{
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008411 int /*abs*/func = BP_ABS_FUNC(bp);
David S. Millerb8ee8322011-04-17 16:56:12 -07008412 int vn;
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008413 u32 val = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008414 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008415
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008416 bnx2x_get_common_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008417
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008418 if (CHIP_IS_E1x(bp)) {
8419 bp->common.int_block = INT_BLOCK_HC;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008420
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008421 bp->igu_dsb_id = DEF_SB_IGU_ID;
8422 bp->igu_base_sb = 0;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00008423 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
8424 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008425 } else {
8426 bp->common.int_block = INT_BLOCK_IGU;
8427 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8428 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8429 DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8430 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8431 } else
8432 DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
8433
8434 bnx2x_get_igu_cam_info(bp);
8435
8436 }
8437 DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
8438 bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8439
8440 /*
8441 * Initialize MF configuration
8442 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008443
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008444 bp->mf_ov = 0;
8445 bp->mf_mode = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008446 vn = BP_E1HVN(bp);
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008447
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008448 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008449 DP(NETIF_MSG_PROBE,
8450 "shmem2base 0x%x, size %d, mfcfg offset %d\n",
8451 bp->common.shmem2_base, SHMEM2_RD(bp, size),
8452 (u32)offsetof(struct shmem2_region, mf_cfg_addr));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008453 if (SHMEM2_HAS(bp, mf_cfg_addr))
8454 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8455 else
8456 bp->common.mf_cfg_base = bp->common.shmem_base +
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008457 offsetof(struct shmem_region, func_mb) +
8458 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008459 /*
8460 * get mf configuration:
Lucas De Marchi25985ed2011-03-30 22:57:33 -03008461 * 1. existence of MF configuration
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008462 * 2. MAC address must be legal (check only upper bytes)
8463 * for Switch-Independent mode;
8464 * OVLAN must be legal for Switch-Dependent mode
8465 * 3. SF_MODE configures specific MF mode
8466 */
8467 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
8468 /* get mf configuration */
8469 val = SHMEM_RD(bp,
8470 dev_info.shared_feature_config.config);
8471 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008472
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008473 switch (val) {
8474 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
8475 val = MF_CFG_RD(bp, func_mf_config[func].
8476 mac_upper);
8477 /* check for legal mac (upper bytes)*/
8478 if (val != 0xffff) {
8479 bp->mf_mode = MULTI_FUNCTION_SI;
8480 bp->mf_config[vn] = MF_CFG_RD(bp,
8481 func_mf_config[func].config);
8482 } else
8483 DP(NETIF_MSG_PROBE, "illegal MAC "
8484 "address for SI\n");
8485 break;
8486 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
8487 /* get OV configuration */
8488 val = MF_CFG_RD(bp,
8489 func_mf_config[FUNC_0].e1hov_tag);
8490 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
8491
8492 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8493 bp->mf_mode = MULTI_FUNCTION_SD;
8494 bp->mf_config[vn] = MF_CFG_RD(bp,
8495 func_mf_config[func].config);
8496 } else
8497 DP(NETIF_MSG_PROBE, "illegal OV for "
8498 "SD\n");
8499 break;
8500 default:
8501 /* Unknown configuration: reset mf_config */
8502 bp->mf_config[vn] = 0;
Lucas De Marchi25985ed2011-03-30 22:57:33 -03008503 DP(NETIF_MSG_PROBE, "Unknown MF mode 0x%x\n",
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008504 val);
8505 }
8506 }
8507
Eilon Greenstein2691d512009-08-12 08:22:08 +00008508 BNX2X_DEV_INFO("%s function mode\n",
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008509 IS_MF(bp) ? "multi" : "single");
Eilon Greenstein2691d512009-08-12 08:22:08 +00008510
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008511 switch (bp->mf_mode) {
8512 case MULTI_FUNCTION_SD:
8513 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
8514 FUNC_MF_CFG_E1HOV_TAG_MASK;
Eilon Greenstein2691d512009-08-12 08:22:08 +00008515 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008516 bp->mf_ov = val;
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008517 BNX2X_DEV_INFO("MF OV for func %d is %d"
8518 " (0x%04x)\n", func,
8519 bp->mf_ov, bp->mf_ov);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008520 } else {
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008521 BNX2X_ERR("No valid MF OV for func %d,"
8522 " aborting\n", func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008523 rc = -EPERM;
8524 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008525 break;
8526 case MULTI_FUNCTION_SI:
8527 BNX2X_DEV_INFO("func %d is in MF "
8528 "switch-independent mode\n", func);
8529 break;
8530 default:
8531 if (vn) {
8532 BNX2X_ERR("VN %d in single function mode,"
8533 " aborting\n", vn);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008534 rc = -EPERM;
8535 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008536 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008537 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008538
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008539 }
8540
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008541 /* adjust igu_sb_cnt to MF for E1x */
8542 if (CHIP_IS_E1x(bp) && IS_MF(bp))
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008543 bp->igu_sb_cnt /= E1HVN_MAX;
8544
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008545 /*
8546 * adjust E2 sb count: to be removed when FW will support
8547 * more then 16 L2 clients
8548 */
8549#define MAX_L2_CLIENTS 16
8550 if (CHIP_IS_E2(bp))
8551 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8552 MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8553
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008554 if (!BP_NOMCP(bp)) {
8555 bnx2x_get_port_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008556
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008557 bp->fw_seq =
8558 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8559 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008560 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8561 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008562
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008563 /* Get MAC addresses */
8564 bnx2x_get_mac_hwinfo(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008565
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00008566#ifdef BCM_CNIC
8567 bnx2x_get_cnic_info(bp);
8568#endif
8569
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008570 return rc;
8571}
8572
Vladislav Zolotarov34f24c72010-04-19 01:13:23 +00008573static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
8574{
8575 int cnt, i, block_end, rodi;
8576 char vpd_data[BNX2X_VPD_LEN+1];
8577 char str_id_reg[VENDOR_ID_LEN+1];
8578 char str_id_cap[VENDOR_ID_LEN+1];
8579 u8 len;
8580
8581 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
8582 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
8583
8584 if (cnt < BNX2X_VPD_LEN)
8585 goto out_not_found;
8586
8587 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
8588 PCI_VPD_LRDT_RO_DATA);
8589 if (i < 0)
8590 goto out_not_found;
8591
8592
8593 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
8594 pci_vpd_lrdt_size(&vpd_data[i]);
8595
8596 i += PCI_VPD_LRDT_TAG_SIZE;
8597
8598 if (block_end > BNX2X_VPD_LEN)
8599 goto out_not_found;
8600
8601 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8602 PCI_VPD_RO_KEYWORD_MFR_ID);
8603 if (rodi < 0)
8604 goto out_not_found;
8605
8606 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8607
8608 if (len != VENDOR_ID_LEN)
8609 goto out_not_found;
8610
8611 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8612
8613 /* vendor specific info */
8614 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
8615 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
8616 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
8617 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
8618
8619 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8620 PCI_VPD_RO_KEYWORD_VENDOR0);
8621 if (rodi >= 0) {
8622 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8623
8624 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8625
8626 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
8627 memcpy(bp->fw_ver, &vpd_data[rodi], len);
8628 bp->fw_ver[len] = ' ';
8629 }
8630 }
8631 return;
8632 }
8633out_not_found:
8634 return;
8635}
8636
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008637static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8638{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008639 int func;
Eilon Greenstein87942b42009-02-12 08:36:49 +00008640 int timer_interval;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008641 int rc;
8642
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008643 mutex_init(&bp->port.phy_mutex);
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07008644 mutex_init(&bp->fw_mb_mutex);
David S. Millerbb7e95c2010-07-27 21:01:35 -07008645 spin_lock_init(&bp->stats_lock);
Michael Chan993ac7b2009-10-10 13:46:56 +00008646#ifdef BCM_CNIC
8647 mutex_init(&bp->cnic_mutex);
8648#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008649
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08008650 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008651 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008652
8653 rc = bnx2x_get_hwinfo(bp);
8654
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008655 if (!rc)
8656 rc = bnx2x_alloc_mem_bp(bp);
8657
Vladislav Zolotarov34f24c72010-04-19 01:13:23 +00008658 bnx2x_read_fwinfo(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008659
8660 func = BP_FUNC(bp);
8661
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008662 /* need to reset chip if undi was active */
8663 if (!BP_NOMCP(bp))
8664 bnx2x_undi_unload(bp);
8665
8666 if (CHIP_REV_IS_FPGA(bp))
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008667 dev_err(&bp->pdev->dev, "FPGA detected\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008668
8669 if (BP_NOMCP(bp) && (func == 0))
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008670 dev_err(&bp->pdev->dev, "MCP disabled, "
8671 "must load devices in order!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008672
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008673 bp->multi_mode = multi_mode;
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008674
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008675 /* Set TPA flags */
8676 if (disable_tpa) {
8677 bp->flags &= ~TPA_ENABLE_FLAG;
8678 bp->dev->features &= ~NETIF_F_LRO;
8679 } else {
8680 bp->flags |= TPA_ENABLE_FLAG;
8681 bp->dev->features |= NETIF_F_LRO;
8682 }
Dmitry Kravkov5d7cd492010-07-27 12:32:19 +00008683 bp->disable_tpa = disable_tpa;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008684
Eilon Greensteina18f5122009-08-12 08:23:26 +00008685 if (CHIP_IS_E1(bp))
8686 bp->dropless_fc = 0;
8687 else
8688 bp->dropless_fc = dropless_fc;
8689
Eilon Greenstein8d5726c2009-02-12 08:37:19 +00008690 bp->mrrs = mrrs;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008691
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008692 bp->tx_ring_size = MAX_TX_AVAIL;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008693
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00008694 /* make sure that the numbers are in the right granularity */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008695 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
8696 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008697
Eilon Greenstein87942b42009-02-12 08:36:49 +00008698 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8699 bp->current_interval = (poll ? poll : timer_interval);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008700
8701 init_timer(&bp->timer);
8702 bp->timer.expires = jiffies + bp->current_interval;
8703 bp->timer.data = (unsigned long) bp;
8704 bp->timer.function = bnx2x_timer;
8705
Shmulik Ravid785b9b12010-12-30 06:27:03 +00008706 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
Vladislav Zolotarove4901dd2010-12-13 05:44:18 +00008707 bnx2x_dcbx_init_params(bp);
8708
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008709 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008710}
8711
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008712
Dmitry Kravkovde0c62d2010-07-27 12:35:24 +00008713/****************************************************************************
8714* General service functions
8715****************************************************************************/
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008716
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008717/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008718static int bnx2x_open(struct net_device *dev)
8719{
8720 struct bnx2x *bp = netdev_priv(dev);
8721
Eilon Greenstein6eccabb2009-01-22 03:37:48 +00008722 netif_carrier_off(dev);
8723
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008724 bnx2x_set_power_state(bp, PCI_D0);
8725
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008726 if (!bnx2x_reset_is_done(bp)) {
8727 do {
8728 /* Reset MCP mail box sequence if there is on going
8729 * recovery
8730 */
8731 bp->fw_seq = 0;
8732
8733 /* If it's the first function to load and reset done
8734 * is still not cleared it may mean that. We don't
8735 * check the attention state here because it may have
8736 * already been cleared by a "common" reset but we
8737 * shell proceed with "process kill" anyway.
8738 */
8739 if ((bnx2x_get_load_cnt(bp) == 0) &&
8740 bnx2x_trylock_hw_lock(bp,
8741 HW_LOCK_RESOURCE_RESERVED_08) &&
8742 (!bnx2x_leader_reset(bp))) {
8743 DP(NETIF_MSG_HW, "Recovered in open\n");
8744 break;
8745 }
8746
8747 bnx2x_set_power_state(bp, PCI_D3hot);
8748
8749 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
8750 " completed yet. Try again later. If u still see this"
8751 " message after a few retries then power cycle is"
8752 " required.\n", bp->dev->name);
8753
8754 return -EAGAIN;
8755 } while (0);
8756 }
8757
8758 bp->recovery_state = BNX2X_RECOVERY_DONE;
8759
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008760 return bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008761}
8762
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008763/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008764static int bnx2x_close(struct net_device *dev)
8765{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008766 struct bnx2x *bp = netdev_priv(dev);
8767
8768 /* Unload the driver, release IRQs */
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008769 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
Vladislav Zolotarovd3dbfee2010-04-19 01:14:49 +00008770 bnx2x_set_power_state(bp, PCI_D3hot);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008771
8772 return 0;
8773}
8774
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08008775#define E1_MAX_UC_LIST 29
8776#define E1H_MAX_UC_LIST 30
8777#define E2_MAX_UC_LIST 14
8778static inline u8 bnx2x_max_uc_list(struct bnx2x *bp)
8779{
8780 if (CHIP_IS_E1(bp))
8781 return E1_MAX_UC_LIST;
8782 else if (CHIP_IS_E1H(bp))
8783 return E1H_MAX_UC_LIST;
8784 else
8785 return E2_MAX_UC_LIST;
8786}
8787
8788
8789static inline u8 bnx2x_uc_list_cam_offset(struct bnx2x *bp)
8790{
8791 if (CHIP_IS_E1(bp))
8792 /* CAM Entries for Port0:
8793 * 0 - prim ETH MAC
8794 * 1 - BCAST MAC
8795 * 2 - iSCSI L2 ring ETH MAC
8796 * 3-31 - UC MACs
8797 *
8798 * Port1 entries are allocated the same way starting from
8799 * entry 32.
8800 */
8801 return 3 + 32 * BP_PORT(bp);
8802 else if (CHIP_IS_E1H(bp)) {
8803 /* CAM Entries:
8804 * 0-7 - prim ETH MAC for each function
8805 * 8-15 - iSCSI L2 ring ETH MAC for each function
8806 * 16 till 255 UC MAC lists for each function
8807 *
8808 * Remark: There is no FCoE support for E1H, thus FCoE related
8809 * MACs are not considered.
8810 */
8811 return E1H_FUNC_MAX * (CAM_ISCSI_ETH_LINE + 1) +
8812 bnx2x_max_uc_list(bp) * BP_FUNC(bp);
8813 } else {
8814 /* CAM Entries (there is a separate CAM per engine):
8815 * 0-4 - prim ETH MAC for each function
8816 * 4-7 - iSCSI L2 ring ETH MAC for each function
8817 * 8-11 - FIP ucast L2 MAC for each function
8818 * 12-15 - ALL_ENODE_MACS mcast MAC for each function
8819 * 16 till 71 UC MAC lists for each function
8820 */
8821 u8 func_idx =
8822 (CHIP_MODE_IS_4_PORT(bp) ? BP_FUNC(bp) : BP_VN(bp));
8823
8824 return E2_FUNC_MAX * (CAM_MAX_PF_LINE + 1) +
8825 bnx2x_max_uc_list(bp) * func_idx;
8826 }
8827}
8828
8829/* set uc list, do not wait as wait implies sleep and
8830 * set_rx_mode can be invoked from non-sleepable context.
8831 *
8832 * Instead we use the same ramrod data buffer each time we need
8833 * to configure a list of addresses, and use the fact that the
8834 * list of MACs is changed in an incremental way and that the
8835 * function is called under the netif_addr_lock. A temporary
8836 * inconsistent CAM configuration (possible in case of very fast
8837 * sequence of add/del/add on the host side) will shortly be
8838 * restored by the handler of the last ramrod.
8839 */
8840static int bnx2x_set_uc_list(struct bnx2x *bp)
8841{
8842 int i = 0, old;
8843 struct net_device *dev = bp->dev;
8844 u8 offset = bnx2x_uc_list_cam_offset(bp);
8845 struct netdev_hw_addr *ha;
8846 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
8847 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
8848
8849 if (netdev_uc_count(dev) > bnx2x_max_uc_list(bp))
8850 return -EINVAL;
8851
8852 netdev_for_each_uc_addr(ha, dev) {
8853 /* copy mac */
8854 config_cmd->config_table[i].msb_mac_addr =
8855 swab16(*(u16 *)&bnx2x_uc_addr(ha)[0]);
8856 config_cmd->config_table[i].middle_mac_addr =
8857 swab16(*(u16 *)&bnx2x_uc_addr(ha)[2]);
8858 config_cmd->config_table[i].lsb_mac_addr =
8859 swab16(*(u16 *)&bnx2x_uc_addr(ha)[4]);
8860
8861 config_cmd->config_table[i].vlan_id = 0;
8862 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
8863 config_cmd->config_table[i].clients_bit_vector =
8864 cpu_to_le32(1 << BP_L_ID(bp));
8865
8866 SET_FLAG(config_cmd->config_table[i].flags,
8867 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
8868 T_ETH_MAC_COMMAND_SET);
8869
8870 DP(NETIF_MSG_IFUP,
8871 "setting UCAST[%d] (%04x:%04x:%04x)\n", i,
8872 config_cmd->config_table[i].msb_mac_addr,
8873 config_cmd->config_table[i].middle_mac_addr,
8874 config_cmd->config_table[i].lsb_mac_addr);
8875
8876 i++;
8877
8878 /* Set uc MAC in NIG */
8879 bnx2x_set_mac_in_nig(bp, 1, bnx2x_uc_addr(ha),
8880 LLH_CAM_ETH_LINE + i);
8881 }
8882 old = config_cmd->hdr.length;
8883 if (old > i) {
8884 for (; i < old; i++) {
8885 if (CAM_IS_INVALID(config_cmd->
8886 config_table[i])) {
8887 /* already invalidated */
8888 break;
8889 }
8890 /* invalidate */
8891 SET_FLAG(config_cmd->config_table[i].flags,
8892 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
8893 T_ETH_MAC_COMMAND_INVALIDATE);
8894 }
8895 }
8896
8897 wmb();
8898
8899 config_cmd->hdr.length = i;
8900 config_cmd->hdr.offset = offset;
8901 config_cmd->hdr.client_id = 0xff;
8902 /* Mark that this ramrod doesn't use bp->set_mac_pending for
8903 * synchronization.
8904 */
8905 config_cmd->hdr.echo = 0;
8906
8907 mb();
8908
8909 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
8910 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
8911
8912}
8913
8914void bnx2x_invalidate_uc_list(struct bnx2x *bp)
8915{
8916 int i;
8917 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
8918 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
8919 int ramrod_flags = WAIT_RAMROD_COMMON;
8920 u8 offset = bnx2x_uc_list_cam_offset(bp);
8921 u8 max_list_size = bnx2x_max_uc_list(bp);
8922
8923 for (i = 0; i < max_list_size; i++) {
8924 SET_FLAG(config_cmd->config_table[i].flags,
8925 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
8926 T_ETH_MAC_COMMAND_INVALIDATE);
8927 bnx2x_set_mac_in_nig(bp, 0, NULL, LLH_CAM_ETH_LINE + 1 + i);
8928 }
8929
8930 wmb();
8931
8932 config_cmd->hdr.length = max_list_size;
8933 config_cmd->hdr.offset = offset;
8934 config_cmd->hdr.client_id = 0xff;
8935 /* We'll wait for a completion this time... */
8936 config_cmd->hdr.echo = 1;
8937
8938 bp->set_mac_pending = 1;
8939
8940 mb();
8941
8942 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
8943 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
8944
8945 /* Wait for a completion */
8946 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
8947 ramrod_flags);
8948
8949}
8950
8951static inline int bnx2x_set_mc_list(struct bnx2x *bp)
8952{
8953 /* some multicasts */
8954 if (CHIP_IS_E1(bp)) {
8955 return bnx2x_set_e1_mc_list(bp);
8956 } else { /* E1H and newer */
8957 return bnx2x_set_e1h_mc_list(bp);
8958 }
8959}
8960
Eilon Greensteinf5372252009-02-12 08:38:30 +00008961/* called with netif_tx_lock from dev_mcast.c */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00008962void bnx2x_set_rx_mode(struct net_device *dev)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008963{
8964 struct bnx2x *bp = netdev_priv(dev);
8965 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008966
8967 if (bp->state != BNX2X_STATE_OPEN) {
8968 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
8969 return;
8970 }
8971
8972 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
8973
8974 if (dev->flags & IFF_PROMISC)
8975 rx_mode = BNX2X_RX_MODE_PROMISC;
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08008976 else if (dev->flags & IFF_ALLMULTI)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008977 rx_mode = BNX2X_RX_MODE_ALLMULTI;
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08008978 else {
8979 /* some multicasts */
8980 if (bnx2x_set_mc_list(bp))
8981 rx_mode = BNX2X_RX_MODE_ALLMULTI;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008982
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08008983 /* some unicasts */
8984 if (bnx2x_set_uc_list(bp))
8985 rx_mode = BNX2X_RX_MODE_PROMISC;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008986 }
8987
8988 bp->rx_mode = rx_mode;
8989 bnx2x_set_storm_rx_mode(bp);
8990}
8991
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008992/* called with rtnl_lock */
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008993static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
8994 int devad, u16 addr)
8995{
8996 struct bnx2x *bp = netdev_priv(netdev);
8997 u16 value;
8998 int rc;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008999
9000 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
9001 prtad, devad, addr);
9002
Eilon Greenstein01cd4522009-08-12 08:23:08 +00009003 /* The HW expects different devad if CL22 is used */
9004 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
9005
9006 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnere10bc842010-09-07 11:40:50 +00009007 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00009008 bnx2x_release_phy_lock(bp);
9009 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
9010
9011 if (!rc)
9012 rc = value;
9013 return rc;
9014}
9015
9016/* called with rtnl_lock */
9017static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
9018 u16 addr, u16 value)
9019{
9020 struct bnx2x *bp = netdev_priv(netdev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00009021 int rc;
9022
9023 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
9024 " value 0x%x\n", prtad, devad, addr, value);
9025
Eilon Greenstein01cd4522009-08-12 08:23:08 +00009026 /* The HW expects different devad if CL22 is used */
9027 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
9028
9029 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnere10bc842010-09-07 11:40:50 +00009030 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00009031 bnx2x_release_phy_lock(bp);
9032 return rc;
9033}
9034
9035/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009036static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9037{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009038 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00009039 struct mii_ioctl_data *mdio = if_mii(ifr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009040
Eilon Greenstein01cd4522009-08-12 08:23:08 +00009041 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
9042 mdio->phy_id, mdio->reg_num, mdio->val_in);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009043
Eilon Greenstein01cd4522009-08-12 08:23:08 +00009044 if (!netif_running(dev))
9045 return -EAGAIN;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009046
Eilon Greenstein01cd4522009-08-12 08:23:08 +00009047 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009048}
9049
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +00009050#ifdef CONFIG_NET_POLL_CONTROLLER
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009051static void poll_bnx2x(struct net_device *dev)
9052{
9053 struct bnx2x *bp = netdev_priv(dev);
9054
9055 disable_irq(bp->pdev->irq);
9056 bnx2x_interrupt(bp->pdev->irq, dev);
9057 enable_irq(bp->pdev->irq);
9058}
9059#endif
9060
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08009061static const struct net_device_ops bnx2x_netdev_ops = {
9062 .ndo_open = bnx2x_open,
9063 .ndo_stop = bnx2x_close,
9064 .ndo_start_xmit = bnx2x_start_xmit,
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00009065 .ndo_select_queue = bnx2x_select_queue,
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08009066 .ndo_set_rx_mode = bnx2x_set_rx_mode,
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08009067 .ndo_set_mac_address = bnx2x_change_mac_addr,
9068 .ndo_validate_addr = eth_validate_addr,
9069 .ndo_do_ioctl = bnx2x_ioctl,
9070 .ndo_change_mtu = bnx2x_change_mtu,
Michał Mirosław66371c42011-04-12 09:38:23 +00009071 .ndo_fix_features = bnx2x_fix_features,
9072 .ndo_set_features = bnx2x_set_features,
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08009073 .ndo_tx_timeout = bnx2x_tx_timeout,
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +00009074#ifdef CONFIG_NET_POLL_CONTROLLER
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08009075 .ndo_poll_controller = poll_bnx2x,
9076#endif
9077};
9078
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009079static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
9080 struct net_device *dev)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009081{
9082 struct bnx2x *bp;
9083 int rc;
9084
9085 SET_NETDEV_DEV(dev, &pdev->dev);
9086 bp = netdev_priv(dev);
9087
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009088 bp->dev = dev;
9089 bp->pdev = pdev;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009090 bp->flags = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009091 bp->pf_num = PCI_FUNC(pdev->devfn);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009092
9093 rc = pci_enable_device(pdev);
9094 if (rc) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009095 dev_err(&bp->pdev->dev,
9096 "Cannot enable PCI device, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009097 goto err_out;
9098 }
9099
9100 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009101 dev_err(&bp->pdev->dev,
9102 "Cannot find PCI device base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009103 rc = -ENODEV;
9104 goto err_out_disable;
9105 }
9106
9107 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009108 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
9109 " base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009110 rc = -ENODEV;
9111 goto err_out_disable;
9112 }
9113
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009114 if (atomic_read(&pdev->enable_cnt) == 1) {
9115 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9116 if (rc) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009117 dev_err(&bp->pdev->dev,
9118 "Cannot obtain PCI resources, aborting\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009119 goto err_out_disable;
9120 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009121
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009122 pci_set_master(pdev);
9123 pci_save_state(pdev);
9124 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009125
9126 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9127 if (bp->pm_cap == 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009128 dev_err(&bp->pdev->dev,
9129 "Cannot find power management capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009130 rc = -EIO;
9131 goto err_out_release;
9132 }
9133
9134 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9135 if (bp->pcie_cap == 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009136 dev_err(&bp->pdev->dev,
9137 "Cannot find PCI Express capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009138 rc = -EIO;
9139 goto err_out_release;
9140 }
9141
FUJITA Tomonori1a983142010-04-04 01:51:03 +00009142 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009143 bp->flags |= USING_DAC_FLAG;
FUJITA Tomonori1a983142010-04-04 01:51:03 +00009144 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009145 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
9146 " failed, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009147 rc = -EIO;
9148 goto err_out_release;
9149 }
9150
FUJITA Tomonori1a983142010-04-04 01:51:03 +00009151 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009152 dev_err(&bp->pdev->dev,
9153 "System does not support DMA, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009154 rc = -EIO;
9155 goto err_out_release;
9156 }
9157
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009158 dev->mem_start = pci_resource_start(pdev, 0);
9159 dev->base_addr = dev->mem_start;
9160 dev->mem_end = pci_resource_end(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009161
9162 dev->irq = pdev->irq;
9163
Arjan van de Ven275f1652008-10-20 21:42:39 -07009164 bp->regview = pci_ioremap_bar(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009165 if (!bp->regview) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009166 dev_err(&bp->pdev->dev,
9167 "Cannot map register space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009168 rc = -ENOMEM;
9169 goto err_out_release;
9170 }
9171
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009172 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009173 min_t(u64, BNX2X_DB_SIZE(bp),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009174 pci_resource_len(pdev, 2)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009175 if (!bp->doorbells) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009176 dev_err(&bp->pdev->dev,
9177 "Cannot map doorbell space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009178 rc = -ENOMEM;
9179 goto err_out_unmap;
9180 }
9181
9182 bnx2x_set_power_state(bp, PCI_D0);
9183
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009184 /* clean indirect addresses */
9185 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
9186 PCICFG_VENDOR_ID_OFFSET);
9187 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
9188 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
9189 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
9190 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009191
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009192 /* Reset the load counter */
9193 bnx2x_clear_load_cnt(bp);
9194
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009195 dev->watchdog_timeo = TX_TIMEOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009196
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08009197 dev->netdev_ops = &bnx2x_netdev_ops;
Dmitry Kravkovde0c62d2010-07-27 12:35:24 +00009198 bnx2x_set_ethtool_ops(dev);
Michał Mirosław66371c42011-04-12 09:38:23 +00009199
9200 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
9201 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
9202 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_HW_VLAN_TX;
9203
9204 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
9205 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
9206
9207 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009208 if (bp->flags & USING_DAC_FLAG)
9209 dev->features |= NETIF_F_HIGHDMA;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009210
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00009211 /* Add Loopback capability to the device */
9212 dev->hw_features |= NETIF_F_LOOPBACK;
9213
Shmulik Ravid98507672011-02-28 12:19:55 -08009214#ifdef BCM_DCBNL
Shmulik Ravid785b9b12010-12-30 06:27:03 +00009215 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
9216#endif
9217
Eilon Greenstein01cd4522009-08-12 08:23:08 +00009218 /* get_port_hwinfo() will set prtad and mmds properly */
9219 bp->mdio.prtad = MDIO_PRTAD_NONE;
9220 bp->mdio.mmds = 0;
9221 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
9222 bp->mdio.dev = dev;
9223 bp->mdio.mdio_read = bnx2x_mdio_read;
9224 bp->mdio.mdio_write = bnx2x_mdio_write;
9225
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009226 return 0;
9227
9228err_out_unmap:
9229 if (bp->regview) {
9230 iounmap(bp->regview);
9231 bp->regview = NULL;
9232 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009233 if (bp->doorbells) {
9234 iounmap(bp->doorbells);
9235 bp->doorbells = NULL;
9236 }
9237
9238err_out_release:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009239 if (atomic_read(&pdev->enable_cnt) == 1)
9240 pci_release_regions(pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009241
9242err_out_disable:
9243 pci_disable_device(pdev);
9244 pci_set_drvdata(pdev, NULL);
9245
9246err_out:
9247 return rc;
9248}
9249
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009250static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
9251 int *width, int *speed)
Eliezer Tamir25047952008-02-28 11:50:16 -08009252{
9253 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9254
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009255 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
9256
9257 /* return value of 1=2.5GHz 2=5GHz */
9258 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
Eliezer Tamir25047952008-02-28 11:50:16 -08009259}
9260
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009261static int bnx2x_check_firmware(struct bnx2x *bp)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009262{
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009263 const struct firmware *firmware = bp->firmware;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009264 struct bnx2x_fw_file_hdr *fw_hdr;
9265 struct bnx2x_fw_file_section *sections;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009266 u32 offset, len, num_ops;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009267 u16 *ops_offsets;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009268 int i;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009269 const u8 *fw_ver;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009270
9271 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
9272 return -EINVAL;
9273
9274 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
9275 sections = (struct bnx2x_fw_file_section *)fw_hdr;
9276
9277 /* Make sure none of the offsets and sizes make us read beyond
9278 * the end of the firmware data */
9279 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
9280 offset = be32_to_cpu(sections[i].offset);
9281 len = be32_to_cpu(sections[i].len);
9282 if (offset + len > firmware->size) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009283 dev_err(&bp->pdev->dev,
9284 "Section %d length is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009285 return -EINVAL;
9286 }
9287 }
9288
9289 /* Likewise for the init_ops offsets */
9290 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
9291 ops_offsets = (u16 *)(firmware->data + offset);
9292 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
9293
9294 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
9295 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009296 dev_err(&bp->pdev->dev,
9297 "Section offset %d is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009298 return -EINVAL;
9299 }
9300 }
9301
9302 /* Check FW version */
9303 offset = be32_to_cpu(fw_hdr->fw_version.offset);
9304 fw_ver = firmware->data + offset;
9305 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
9306 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
9307 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
9308 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009309 dev_err(&bp->pdev->dev,
9310 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009311 fw_ver[0], fw_ver[1], fw_ver[2],
9312 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
9313 BCM_5710_FW_MINOR_VERSION,
9314 BCM_5710_FW_REVISION_VERSION,
9315 BCM_5710_FW_ENGINEERING_VERSION);
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009316 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009317 }
9318
9319 return 0;
9320}
9321
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009322static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009323{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009324 const __be32 *source = (const __be32 *)_source;
9325 u32 *target = (u32 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009326 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009327
9328 for (i = 0; i < n/4; i++)
9329 target[i] = be32_to_cpu(source[i]);
9330}
9331
9332/*
9333 Ops array is stored in the following format:
9334 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
9335 */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009336static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009337{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009338 const __be32 *source = (const __be32 *)_source;
9339 struct raw_op *target = (struct raw_op *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009340 u32 i, j, tmp;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009341
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009342 for (i = 0, j = 0; i < n/8; i++, j += 2) {
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009343 tmp = be32_to_cpu(source[j]);
9344 target[i].op = (tmp >> 24) & 0xff;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009345 target[i].offset = tmp & 0xffffff;
9346 target[i].raw_data = be32_to_cpu(source[j + 1]);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009347 }
9348}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009349
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009350/**
9351 * IRO array is stored in the following format:
9352 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
9353 */
9354static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
9355{
9356 const __be32 *source = (const __be32 *)_source;
9357 struct iro *target = (struct iro *)_target;
9358 u32 i, j, tmp;
9359
9360 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
9361 target[i].base = be32_to_cpu(source[j]);
9362 j++;
9363 tmp = be32_to_cpu(source[j]);
9364 target[i].m1 = (tmp >> 16) & 0xffff;
9365 target[i].m2 = tmp & 0xffff;
9366 j++;
9367 tmp = be32_to_cpu(source[j]);
9368 target[i].m3 = (tmp >> 16) & 0xffff;
9369 target[i].size = tmp & 0xffff;
9370 j++;
9371 }
9372}
9373
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009374static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009375{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009376 const __be16 *source = (const __be16 *)_source;
9377 u16 *target = (u16 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009378 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009379
9380 for (i = 0; i < n/2; i++)
9381 target[i] = be16_to_cpu(source[i]);
9382}
9383
Joe Perches7995c642010-02-17 15:01:52 +00009384#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
9385do { \
9386 u32 len = be32_to_cpu(fw_hdr->arr.len); \
9387 bp->arr = kmalloc(len, GFP_KERNEL); \
9388 if (!bp->arr) { \
9389 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
9390 goto lbl; \
9391 } \
9392 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
9393 (u8 *)bp->arr, len); \
9394} while (0)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009395
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009396int bnx2x_init_firmware(struct bnx2x *bp)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009397{
Ben Hutchings45229b42009-11-07 11:53:39 +00009398 const char *fw_file_name;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009399 struct bnx2x_fw_file_hdr *fw_hdr;
Ben Hutchings45229b42009-11-07 11:53:39 +00009400 int rc;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009401
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009402 if (CHIP_IS_E1(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +00009403 fw_file_name = FW_FILE_NAME_E1;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009404 else if (CHIP_IS_E1H(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +00009405 fw_file_name = FW_FILE_NAME_E1H;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009406 else if (CHIP_IS_E2(bp))
9407 fw_file_name = FW_FILE_NAME_E2;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009408 else {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009409 BNX2X_ERR("Unsupported chip revision\n");
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009410 return -EINVAL;
9411 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009412
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009413 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009414
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009415 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009416 if (rc) {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009417 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009418 goto request_firmware_exit;
9419 }
9420
9421 rc = bnx2x_check_firmware(bp);
9422 if (rc) {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009423 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009424 goto request_firmware_exit;
9425 }
9426
9427 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
9428
9429 /* Initialize the pointers to the init arrays */
9430 /* Blob */
9431 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
9432
9433 /* Opcodes */
9434 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
9435
9436 /* Offsets */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009437 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
9438 be16_to_cpu_n);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009439
9440 /* STORMs firmware */
Eilon Greenstein573f2032009-08-12 08:24:14 +00009441 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9442 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
9443 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
9444 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
9445 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9446 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
9447 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
9448 be32_to_cpu(fw_hdr->usem_pram_data.offset);
9449 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9450 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
9451 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
9452 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
9453 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9454 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
9455 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
9456 be32_to_cpu(fw_hdr->csem_pram_data.offset);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009457 /* IRO */
9458 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009459
9460 return 0;
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009461
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009462iro_alloc_err:
9463 kfree(bp->init_ops_offsets);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009464init_offsets_alloc_err:
9465 kfree(bp->init_ops);
9466init_ops_alloc_err:
9467 kfree(bp->init_data);
9468request_firmware_exit:
9469 release_firmware(bp->firmware);
9470
9471 return rc;
9472}
9473
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009474static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
9475{
9476 int cid_count = L2_FP_COUNT(l2_cid_count);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009477
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009478#ifdef BCM_CNIC
9479 cid_count += CNIC_CID_MAX;
9480#endif
9481 return roundup(cid_count, QM_CID_ROUND);
9482}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00009483
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009484static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9485 const struct pci_device_id *ent)
9486{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009487 struct net_device *dev = NULL;
9488 struct bnx2x *bp;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009489 int pcie_width, pcie_speed;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009490 int rc, cid_count;
9491
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009492 switch (ent->driver_data) {
9493 case BCM57710:
9494 case BCM57711:
9495 case BCM57711E:
9496 cid_count = FP_SB_MAX_E1x;
9497 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009498
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009499 case BCM57712:
9500 case BCM57712E:
9501 cid_count = FP_SB_MAX_E2;
9502 break;
9503
9504 default:
9505 pr_err("Unknown board_type (%ld), aborting\n",
9506 ent->driver_data);
Vasiliy Kulikov870634b2010-11-14 10:08:34 +00009507 return -ENODEV;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009508 }
9509
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00009510 cid_count += NONE_ETH_CONTEXT_USE + CNIC_CONTEXT_USE;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00009511
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009512 /* dev zeroed in init_etherdev */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009513 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009514 if (!dev) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009515 dev_err(&pdev->dev, "Cannot allocate net device\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009516 return -ENOMEM;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009517 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009518
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009519 bp = netdev_priv(dev);
Joe Perches7995c642010-02-17 15:01:52 +00009520 bp->msg_enable = debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009521
Eilon Greensteindf4770de2009-08-12 08:23:28 +00009522 pci_set_drvdata(pdev, dev);
9523
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009524 bp->l2_cid_count = cid_count;
9525
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009526 rc = bnx2x_init_dev(pdev, dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009527 if (rc < 0) {
9528 free_netdev(dev);
9529 return rc;
9530 }
9531
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009532 rc = bnx2x_init_bp(bp);
Eilon Greenstein693fc0d2009-01-14 06:43:52 +00009533 if (rc)
9534 goto init_one_exit;
9535
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009536 /* calc qm_cid_count */
9537 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9538
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00009539#ifdef BCM_CNIC
9540 /* disable FCOE L2 queue for E1x*/
9541 if (CHIP_IS_E1x(bp))
9542 bp->flags |= NO_FCOE_FLAG;
9543
9544#endif
9545
Lucas De Marchi25985ed2011-03-30 22:57:33 -03009546 /* Configure interrupt mode: try to enable MSI-X/MSI if
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009547 * needed, set bp->num_queues appropriately.
9548 */
9549 bnx2x_set_int_mode(bp);
9550
9551 /* Add all NAPI objects */
9552 bnx2x_add_all_napi(bp);
9553
Vladislav Zolotarovb3400072010-11-24 11:09:50 -08009554 rc = register_netdev(dev);
9555 if (rc) {
9556 dev_err(&pdev->dev, "Cannot register net device\n");
9557 goto init_one_exit;
9558 }
9559
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00009560#ifdef BCM_CNIC
9561 if (!NO_FCOE(bp)) {
9562 /* Add storage MAC address */
9563 rtnl_lock();
9564 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
9565 rtnl_unlock();
9566 }
9567#endif
9568
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009569 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009570
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009571 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9572 " IRQ %d, ", board_info[ent->driver_data].name,
9573 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009574 pcie_width,
9575 ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9576 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9577 "5GHz (Gen2)" : "2.5GHz",
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009578 dev->base_addr, bp->pdev->irq);
9579 pr_cont("node addr %pM\n", dev->dev_addr);
Eilon Greensteinc0162012009-03-02 08:01:05 +00009580
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009581 return 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009582
9583init_one_exit:
9584 if (bp->regview)
9585 iounmap(bp->regview);
9586
9587 if (bp->doorbells)
9588 iounmap(bp->doorbells);
9589
9590 free_netdev(dev);
9591
9592 if (atomic_read(&pdev->enable_cnt) == 1)
9593 pci_release_regions(pdev);
9594
9595 pci_disable_device(pdev);
9596 pci_set_drvdata(pdev, NULL);
9597
9598 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009599}
9600
9601static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9602{
9603 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -08009604 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009605
Eliezer Tamir228241e2008-02-28 11:56:57 -08009606 if (!dev) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009607 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
Eliezer Tamir228241e2008-02-28 11:56:57 -08009608 return;
9609 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08009610 bp = netdev_priv(dev);
9611
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00009612#ifdef BCM_CNIC
9613 /* Delete storage MAC address */
9614 if (!NO_FCOE(bp)) {
9615 rtnl_lock();
9616 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
9617 rtnl_unlock();
9618 }
9619#endif
9620
Shmulik Ravid98507672011-02-28 12:19:55 -08009621#ifdef BCM_DCBNL
9622 /* Delete app tlvs from dcbnl */
9623 bnx2x_dcbnl_update_applist(bp, true);
9624#endif
9625
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009626 unregister_netdev(dev);
9627
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009628 /* Delete all NAPI objects */
9629 bnx2x_del_all_napi(bp);
9630
Vladislav Zolotarov084d6cb2011-01-09 02:20:19 +00009631 /* Power on: we can't let PCI layer write to us while we are in D3 */
9632 bnx2x_set_power_state(bp, PCI_D0);
9633
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009634 /* Disable MSI/MSI-X */
9635 bnx2x_disable_msi(bp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00009636
Vladislav Zolotarov084d6cb2011-01-09 02:20:19 +00009637 /* Power off */
9638 bnx2x_set_power_state(bp, PCI_D3hot);
9639
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009640 /* Make sure RESET task is not scheduled before continuing */
9641 cancel_delayed_work_sync(&bp->reset_task);
9642
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009643 if (bp->regview)
9644 iounmap(bp->regview);
9645
9646 if (bp->doorbells)
9647 iounmap(bp->doorbells);
9648
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009649 bnx2x_free_mem_bp(bp);
9650
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009651 free_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009652
9653 if (atomic_read(&pdev->enable_cnt) == 1)
9654 pci_release_regions(pdev);
9655
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009656 pci_disable_device(pdev);
9657 pci_set_drvdata(pdev, NULL);
9658}
9659
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009660static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
9661{
9662 int i;
9663
9664 bp->state = BNX2X_STATE_ERROR;
9665
9666 bp->rx_mode = BNX2X_RX_MODE_NONE;
9667
9668 bnx2x_netif_stop(bp, 0);
Stanislaw Gruszkac89af1a2010-05-17 17:35:38 -07009669 netif_carrier_off(bp->dev);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009670
9671 del_timer_sync(&bp->timer);
9672 bp->stats_state = STATS_STATE_DISABLED;
9673 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
9674
9675 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009676 bnx2x_free_irq(bp);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009677
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009678 /* Free SKBs, SGEs, TPA pool and driver internals */
9679 bnx2x_free_skbs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009680
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00009681 for_each_rx_queue(bp, i)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009682 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009683
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009684 bnx2x_free_mem(bp);
9685
9686 bp->state = BNX2X_STATE_CLOSED;
9687
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009688 return 0;
9689}
9690
9691static void bnx2x_eeh_recover(struct bnx2x *bp)
9692{
9693 u32 val;
9694
9695 mutex_init(&bp->port.phy_mutex);
9696
9697 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9698 bp->link_params.shmem_base = bp->common.shmem_base;
9699 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
9700
9701 if (!bp->common.shmem_base ||
9702 (bp->common.shmem_base < 0xA0000) ||
9703 (bp->common.shmem_base >= 0xC0000)) {
9704 BNX2X_DEV_INFO("MCP not active\n");
9705 bp->flags |= NO_MCP_FLAG;
9706 return;
9707 }
9708
9709 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9710 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9711 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9712 BNX2X_ERR("BAD MCP validity signature\n");
9713
9714 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009715 bp->fw_seq =
9716 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9717 DRV_MSG_SEQ_NUMBER_MASK);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009718 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9719 }
9720}
9721
Wendy Xiong493adb12008-06-23 20:36:22 -07009722/**
9723 * bnx2x_io_error_detected - called when PCI error is detected
9724 * @pdev: Pointer to PCI device
9725 * @state: The current pci connection state
9726 *
9727 * This function is called after a PCI bus error affecting
9728 * this device has been detected.
9729 */
9730static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
9731 pci_channel_state_t state)
9732{
9733 struct net_device *dev = pci_get_drvdata(pdev);
9734 struct bnx2x *bp = netdev_priv(dev);
9735
9736 rtnl_lock();
9737
9738 netif_device_detach(dev);
9739
Dean Nelson07ce50e2009-07-31 09:13:25 +00009740 if (state == pci_channel_io_perm_failure) {
9741 rtnl_unlock();
9742 return PCI_ERS_RESULT_DISCONNECT;
9743 }
9744
Wendy Xiong493adb12008-06-23 20:36:22 -07009745 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009746 bnx2x_eeh_nic_unload(bp);
Wendy Xiong493adb12008-06-23 20:36:22 -07009747
9748 pci_disable_device(pdev);
9749
9750 rtnl_unlock();
9751
9752 /* Request a slot reset */
9753 return PCI_ERS_RESULT_NEED_RESET;
9754}
9755
9756/**
9757 * bnx2x_io_slot_reset - called after the PCI bus has been reset
9758 * @pdev: Pointer to PCI device
9759 *
9760 * Restart the card from scratch, as if from a cold-boot.
9761 */
9762static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
9763{
9764 struct net_device *dev = pci_get_drvdata(pdev);
9765 struct bnx2x *bp = netdev_priv(dev);
9766
9767 rtnl_lock();
9768
9769 if (pci_enable_device(pdev)) {
9770 dev_err(&pdev->dev,
9771 "Cannot re-enable PCI device after reset\n");
9772 rtnl_unlock();
9773 return PCI_ERS_RESULT_DISCONNECT;
9774 }
9775
9776 pci_set_master(pdev);
9777 pci_restore_state(pdev);
9778
9779 if (netif_running(dev))
9780 bnx2x_set_power_state(bp, PCI_D0);
9781
9782 rtnl_unlock();
9783
9784 return PCI_ERS_RESULT_RECOVERED;
9785}
9786
9787/**
9788 * bnx2x_io_resume - called when traffic can start flowing again
9789 * @pdev: Pointer to PCI device
9790 *
9791 * This callback is called when the error recovery driver tells us that
9792 * its OK to resume normal operation.
9793 */
9794static void bnx2x_io_resume(struct pci_dev *pdev)
9795{
9796 struct net_device *dev = pci_get_drvdata(pdev);
9797 struct bnx2x *bp = netdev_priv(dev);
9798
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009799 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009800 printk(KERN_ERR "Handling parity error recovery. "
9801 "Try again later\n");
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009802 return;
9803 }
9804
Wendy Xiong493adb12008-06-23 20:36:22 -07009805 rtnl_lock();
9806
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009807 bnx2x_eeh_recover(bp);
9808
Wendy Xiong493adb12008-06-23 20:36:22 -07009809 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009810 bnx2x_nic_load(bp, LOAD_NORMAL);
Wendy Xiong493adb12008-06-23 20:36:22 -07009811
9812 netif_device_attach(dev);
9813
9814 rtnl_unlock();
9815}
9816
9817static struct pci_error_handlers bnx2x_err_handler = {
9818 .error_detected = bnx2x_io_error_detected,
Eilon Greenstein356e2382009-02-12 08:38:32 +00009819 .slot_reset = bnx2x_io_slot_reset,
9820 .resume = bnx2x_io_resume,
Wendy Xiong493adb12008-06-23 20:36:22 -07009821};
9822
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009823static struct pci_driver bnx2x_pci_driver = {
Wendy Xiong493adb12008-06-23 20:36:22 -07009824 .name = DRV_MODULE_NAME,
9825 .id_table = bnx2x_pci_tbl,
9826 .probe = bnx2x_init_one,
9827 .remove = __devexit_p(bnx2x_remove_one),
9828 .suspend = bnx2x_suspend,
9829 .resume = bnx2x_resume,
9830 .err_handler = &bnx2x_err_handler,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009831};
9832
9833static int __init bnx2x_init(void)
9834{
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009835 int ret;
9836
Joe Perches7995c642010-02-17 15:01:52 +00009837 pr_info("%s", version);
Eilon Greenstein938cf542009-08-12 08:23:37 +00009838
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009839 bnx2x_wq = create_singlethread_workqueue("bnx2x");
9840 if (bnx2x_wq == NULL) {
Joe Perches7995c642010-02-17 15:01:52 +00009841 pr_err("Cannot create workqueue\n");
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009842 return -ENOMEM;
9843 }
9844
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009845 ret = pci_register_driver(&bnx2x_pci_driver);
9846 if (ret) {
Joe Perches7995c642010-02-17 15:01:52 +00009847 pr_err("Cannot register driver\n");
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009848 destroy_workqueue(bnx2x_wq);
9849 }
9850 return ret;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009851}
9852
9853static void __exit bnx2x_cleanup(void)
9854{
9855 pci_unregister_driver(&bnx2x_pci_driver);
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009856
9857 destroy_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009858}
9859
9860module_init(bnx2x_init);
9861module_exit(bnx2x_cleanup);
9862
Michael Chan993ac7b2009-10-10 13:46:56 +00009863#ifdef BCM_CNIC
9864
9865/* count denotes the number of new completions we have seen */
9866static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9867{
9868 struct eth_spe *spe;
9869
9870#ifdef BNX2X_STOP_ON_ERROR
9871 if (unlikely(bp->panic))
9872 return;
9873#endif
9874
9875 spin_lock_bh(&bp->spq_lock);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009876 BUG_ON(bp->cnic_spq_pending < count);
Michael Chan993ac7b2009-10-10 13:46:56 +00009877 bp->cnic_spq_pending -= count;
9878
Michael Chan993ac7b2009-10-10 13:46:56 +00009879
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009880 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
9881 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
9882 & SPE_HDR_CONN_TYPE) >>
9883 SPE_HDR_CONN_TYPE_SHIFT;
9884
9885 /* Set validation for iSCSI L2 client before sending SETUP
9886 * ramrod
9887 */
9888 if (type == ETH_CONNECTION_TYPE) {
9889 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
9890 hdr.conn_and_cmd_data) >>
9891 SPE_HDR_CMD_ID_SHIFT) & 0xff;
9892
9893 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
9894 bnx2x_set_ctx_validation(&bp->context.
9895 vcxt[BNX2X_ISCSI_ETH_CID].eth,
9896 HW_CID(bp, BNX2X_ISCSI_ETH_CID));
9897 }
9898
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08009899 /* There may be not more than 8 L2 and not more than 8 L5 SPEs
9900 * We also check that the number of outstanding
9901 * COMMON ramrods is not more than the EQ and SPQ can
9902 * accommodate.
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009903 */
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08009904 if (type == ETH_CONNECTION_TYPE) {
9905 if (!atomic_read(&bp->cq_spq_left))
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009906 break;
9907 else
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08009908 atomic_dec(&bp->cq_spq_left);
9909 } else if (type == NONE_CONNECTION_TYPE) {
9910 if (!atomic_read(&bp->eq_spq_left))
9911 break;
9912 else
9913 atomic_dec(&bp->eq_spq_left);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00009914 } else if ((type == ISCSI_CONNECTION_TYPE) ||
9915 (type == FCOE_CONNECTION_TYPE)) {
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009916 if (bp->cnic_spq_pending >=
9917 bp->cnic_eth_dev.max_kwqe_pending)
9918 break;
9919 else
9920 bp->cnic_spq_pending++;
9921 } else {
9922 BNX2X_ERR("Unknown SPE type: %d\n", type);
9923 bnx2x_panic();
Michael Chan993ac7b2009-10-10 13:46:56 +00009924 break;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009925 }
Michael Chan993ac7b2009-10-10 13:46:56 +00009926
9927 spe = bnx2x_sp_get_next(bp);
9928 *spe = *bp->cnic_kwq_cons;
9929
Michael Chan993ac7b2009-10-10 13:46:56 +00009930 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
9931 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
9932
9933 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
9934 bp->cnic_kwq_cons = bp->cnic_kwq;
9935 else
9936 bp->cnic_kwq_cons++;
9937 }
9938 bnx2x_sp_prod_update(bp);
9939 spin_unlock_bh(&bp->spq_lock);
9940}
9941
9942static int bnx2x_cnic_sp_queue(struct net_device *dev,
9943 struct kwqe_16 *kwqes[], u32 count)
9944{
9945 struct bnx2x *bp = netdev_priv(dev);
9946 int i;
9947
9948#ifdef BNX2X_STOP_ON_ERROR
9949 if (unlikely(bp->panic))
9950 return -EIO;
9951#endif
9952
9953 spin_lock_bh(&bp->spq_lock);
9954
9955 for (i = 0; i < count; i++) {
9956 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
9957
9958 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
9959 break;
9960
9961 *bp->cnic_kwq_prod = *spe;
9962
9963 bp->cnic_kwq_pending++;
9964
9965 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
9966 spe->hdr.conn_and_cmd_data, spe->hdr.type,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009967 spe->data.update_data_addr.hi,
9968 spe->data.update_data_addr.lo,
Michael Chan993ac7b2009-10-10 13:46:56 +00009969 bp->cnic_kwq_pending);
9970
9971 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
9972 bp->cnic_kwq_prod = bp->cnic_kwq;
9973 else
9974 bp->cnic_kwq_prod++;
9975 }
9976
9977 spin_unlock_bh(&bp->spq_lock);
9978
9979 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
9980 bnx2x_cnic_sp_post(bp, 0);
9981
9982 return i;
9983}
9984
9985static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9986{
9987 struct cnic_ops *c_ops;
9988 int rc = 0;
9989
9990 mutex_lock(&bp->cnic_mutex);
Eric Dumazet13707f92011-01-26 19:28:23 +00009991 c_ops = rcu_dereference_protected(bp->cnic_ops,
9992 lockdep_is_held(&bp->cnic_mutex));
Michael Chan993ac7b2009-10-10 13:46:56 +00009993 if (c_ops)
9994 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9995 mutex_unlock(&bp->cnic_mutex);
9996
9997 return rc;
9998}
9999
10000static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
10001{
10002 struct cnic_ops *c_ops;
10003 int rc = 0;
10004
10005 rcu_read_lock();
10006 c_ops = rcu_dereference(bp->cnic_ops);
10007 if (c_ops)
10008 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
10009 rcu_read_unlock();
10010
10011 return rc;
10012}
10013
10014/*
10015 * for commands that have no data
10016 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000010017int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
Michael Chan993ac7b2009-10-10 13:46:56 +000010018{
10019 struct cnic_ctl_info ctl = {0};
10020
10021 ctl.cmd = cmd;
10022
10023 return bnx2x_cnic_ctl_send(bp, &ctl);
10024}
10025
10026static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
10027{
10028 struct cnic_ctl_info ctl;
10029
10030 /* first we tell CNIC and only then we count this as a completion */
10031 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
10032 ctl.data.comp.cid = cid;
10033
10034 bnx2x_cnic_ctl_send_bh(bp, &ctl);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +000010035 bnx2x_cnic_sp_post(bp, 0);
Michael Chan993ac7b2009-10-10 13:46:56 +000010036}
10037
10038static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
10039{
10040 struct bnx2x *bp = netdev_priv(dev);
10041 int rc = 0;
10042
10043 switch (ctl->cmd) {
10044 case DRV_CTL_CTXTBL_WR_CMD: {
10045 u32 index = ctl->data.io.offset;
10046 dma_addr_t addr = ctl->data.io.dma_addr;
10047
10048 bnx2x_ilt_wr(bp, index, addr);
10049 break;
10050 }
10051
Dmitry Kravkovc2bff632010-10-06 03:33:18 +000010052 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
10053 int count = ctl->data.credit.credit_count;
Michael Chan993ac7b2009-10-10 13:46:56 +000010054
10055 bnx2x_cnic_sp_post(bp, count);
10056 break;
10057 }
10058
10059 /* rtnl_lock is held. */
10060 case DRV_CTL_START_L2_CMD: {
10061 u32 cli = ctl->data.ring.client_id;
10062
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +000010063 /* Clear FCoE FIP and ALL ENODE MACs addresses first */
10064 bnx2x_del_fcoe_eth_macs(bp);
10065
Dmitry Kravkov523224a2010-10-06 03:23:26 +000010066 /* Set iSCSI MAC address */
10067 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
10068
10069 mmiowb();
10070 barrier();
10071
10072 /* Start accepting on iSCSI L2 ring. Accept all multicasts
10073 * because it's the only way for UIO Client to accept
10074 * multicasts (in non-promiscuous mode only one Client per
10075 * function will receive multicast packets (leading in our
10076 * case).
10077 */
10078 bnx2x_rxq_set_mac_filters(bp, cli,
10079 BNX2X_ACCEPT_UNICAST |
10080 BNX2X_ACCEPT_BROADCAST |
10081 BNX2X_ACCEPT_ALL_MULTICAST);
10082 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
10083
Michael Chan993ac7b2009-10-10 13:46:56 +000010084 break;
10085 }
10086
10087 /* rtnl_lock is held. */
10088 case DRV_CTL_STOP_L2_CMD: {
10089 u32 cli = ctl->data.ring.client_id;
10090
Dmitry Kravkov523224a2010-10-06 03:23:26 +000010091 /* Stop accepting on iSCSI L2 ring */
10092 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
10093 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
10094
10095 mmiowb();
10096 barrier();
10097
10098 /* Unset iSCSI L2 MAC */
10099 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
Michael Chan993ac7b2009-10-10 13:46:56 +000010100 break;
10101 }
Dmitry Kravkovc2bff632010-10-06 03:33:18 +000010102 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
10103 int count = ctl->data.credit.credit_count;
10104
10105 smp_mb__before_atomic_inc();
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -080010106 atomic_add(count, &bp->cq_spq_left);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +000010107 smp_mb__after_atomic_inc();
10108 break;
10109 }
Michael Chan993ac7b2009-10-10 13:46:56 +000010110
Dmitry Kravkovfab0dc82011-03-31 17:04:22 -070010111 case DRV_CTL_ISCSI_STOPPED_CMD: {
10112 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_ISCSI_STOPPED);
10113 break;
10114 }
10115
Michael Chan993ac7b2009-10-10 13:46:56 +000010116 default:
10117 BNX2X_ERR("unknown command %x\n", ctl->cmd);
10118 rc = -EINVAL;
10119 }
10120
10121 return rc;
10122}
10123
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000010124void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
Michael Chan993ac7b2009-10-10 13:46:56 +000010125{
10126 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10127
10128 if (bp->flags & USING_MSIX_FLAG) {
10129 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
10130 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
10131 cp->irq_arr[0].vector = bp->msix_table[1].vector;
10132 } else {
10133 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
10134 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
10135 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000010136 if (CHIP_IS_E2(bp))
10137 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
10138 else
10139 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
10140
Michael Chan993ac7b2009-10-10 13:46:56 +000010141 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +000010142 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
Michael Chan993ac7b2009-10-10 13:46:56 +000010143 cp->irq_arr[1].status_blk = bp->def_status_blk;
10144 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
Dmitry Kravkov523224a2010-10-06 03:23:26 +000010145 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
Michael Chan993ac7b2009-10-10 13:46:56 +000010146
10147 cp->num_irq = 2;
10148}
10149
10150static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
10151 void *data)
10152{
10153 struct bnx2x *bp = netdev_priv(dev);
10154 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10155
10156 if (ops == NULL)
10157 return -EINVAL;
10158
Michael Chan993ac7b2009-10-10 13:46:56 +000010159 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
10160 if (!bp->cnic_kwq)
10161 return -ENOMEM;
10162
10163 bp->cnic_kwq_cons = bp->cnic_kwq;
10164 bp->cnic_kwq_prod = bp->cnic_kwq;
10165 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
10166
10167 bp->cnic_spq_pending = 0;
10168 bp->cnic_kwq_pending = 0;
10169
10170 bp->cnic_data = data;
10171
10172 cp->num_irq = 0;
10173 cp->drv_state = CNIC_DRV_STATE_REGD;
Dmitry Kravkov523224a2010-10-06 03:23:26 +000010174 cp->iro_arr = bp->iro_arr;
Michael Chan993ac7b2009-10-10 13:46:56 +000010175
Michael Chan993ac7b2009-10-10 13:46:56 +000010176 bnx2x_setup_cnic_irq_info(bp);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +000010177
Michael Chan993ac7b2009-10-10 13:46:56 +000010178 rcu_assign_pointer(bp->cnic_ops, ops);
10179
10180 return 0;
10181}
10182
10183static int bnx2x_unregister_cnic(struct net_device *dev)
10184{
10185 struct bnx2x *bp = netdev_priv(dev);
10186 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10187
10188 mutex_lock(&bp->cnic_mutex);
Michael Chan993ac7b2009-10-10 13:46:56 +000010189 cp->drv_state = 0;
10190 rcu_assign_pointer(bp->cnic_ops, NULL);
10191 mutex_unlock(&bp->cnic_mutex);
10192 synchronize_rcu();
10193 kfree(bp->cnic_kwq);
10194 bp->cnic_kwq = NULL;
10195
10196 return 0;
10197}
10198
10199struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
10200{
10201 struct bnx2x *bp = netdev_priv(dev);
10202 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10203
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +000010204 /* If both iSCSI and FCoE are disabled - return NULL in
10205 * order to indicate CNIC that it should not try to work
10206 * with this device.
10207 */
10208 if (NO_ISCSI(bp) && NO_FCOE(bp))
10209 return NULL;
10210
Michael Chan993ac7b2009-10-10 13:46:56 +000010211 cp->drv_owner = THIS_MODULE;
10212 cp->chip_id = CHIP_ID(bp);
10213 cp->pdev = bp->pdev;
10214 cp->io_base = bp->regview;
10215 cp->io_base2 = bp->doorbells;
10216 cp->max_kwqe_pending = 8;
Dmitry Kravkov523224a2010-10-06 03:23:26 +000010217 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +000010218 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
10219 bnx2x_cid_ilt_lines(bp);
Michael Chan993ac7b2009-10-10 13:46:56 +000010220 cp->ctx_tbl_len = CNIC_ILT_LINES;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +000010221 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
Michael Chan993ac7b2009-10-10 13:46:56 +000010222 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
10223 cp->drv_ctl = bnx2x_drv_ctl;
10224 cp->drv_register_cnic = bnx2x_register_cnic;
10225 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +000010226 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
10227 cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID +
10228 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +000010229 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
Michael Chan993ac7b2009-10-10 13:46:56 +000010230
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +000010231 if (NO_ISCSI_OOO(bp))
10232 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
10233
10234 if (NO_ISCSI(bp))
10235 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
10236
10237 if (NO_FCOE(bp))
10238 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
10239
Dmitry Kravkovc2bff632010-10-06 03:33:18 +000010240 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
10241 "starting cid %d\n",
10242 cp->ctx_blk_size,
10243 cp->ctx_tbl_offset,
10244 cp->ctx_tbl_len,
10245 cp->starting_cid);
Michael Chan993ac7b2009-10-10 13:46:56 +000010246 return cp;
10247}
10248EXPORT_SYMBOL(bnx2x_cnic_probe);
10249
10250#endif /* BCM_CNIC */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070010251