blob: 203e9bf658754fe30150e8e9f099212f22744fa7 [file] [log] [blame]
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001/* bnx2x_main.c: Broadcom Everest network driver.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002 *
Vladislav Zolotarov3359fce2010-02-17 13:35:01 -08003 * Copyright (c) 2007-2010 Broadcom Corporation
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
Eilon Greenstein24e3fce2008-06-12 14:30:28 -07009 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
Eilon Greensteinca003922009-08-12 22:53:28 -070013 * Slowpath and fastpath rework by Vladislav Zolotarov
Eliezer Tamirc14423f2008-02-28 11:49:42 -080014 * Statistics and Link management by Yitchak Gertner
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020015 *
16 */
17
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020018#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020026#include <linux/interrupt.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/dma-mapping.h>
33#include <linux/bitops.h>
34#include <linux/irq.h>
35#include <linux/delay.h>
36#include <asm/byteorder.h>
37#include <linux/time.h>
38#include <linux/ethtool.h>
39#include <linux/mii.h>
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080040#include <linux/if_vlan.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020041#include <net/ip.h>
42#include <net/tcp.h>
43#include <net/checksum.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070044#include <net/ip6_checksum.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020045#include <linux/workqueue.h>
46#include <linux/crc32.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070047#include <linux/crc32c.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020048#include <linux/prefetch.h>
49#include <linux/zlib.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020050#include <linux/io.h>
Ben Hutchings45229b42009-11-07 11:53:39 +000051#include <linux/stringify.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020052
Dmitry Kravkovb0efbb92010-07-27 12:33:43 +000053#define BNX2X_MAIN
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020054#include "bnx2x.h"
55#include "bnx2x_init.h"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070056#include "bnx2x_init_ops.h"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000057#include "bnx2x_cmn.h"
Vladislav Zolotarove4901dd2010-12-13 05:44:18 +000058#include "bnx2x_dcb.h"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020059
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070060#include <linux/firmware.h>
61#include "bnx2x_fw_file_hdr.h"
62/* FW files */
Ben Hutchings45229b42009-11-07 11:53:39 +000063#define FW_FILE_VERSION \
64 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
65 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
66 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
67 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
Dmitry Kravkov560131f2010-10-06 03:18:47 +000068#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
69#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000070#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070071
Eilon Greenstein34f80b02008-06-23 20:33:01 -070072/* Time in jiffies before concluding the transmitter is hung */
73#define TX_TIMEOUT (5*HZ)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020074
Andrew Morton53a10562008-02-09 23:16:41 -080075static char version[] __devinitdata =
Eilon Greenstein34f80b02008-06-23 20:33:01 -070076 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020077 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78
Eilon Greenstein24e3fce2008-06-12 14:30:28 -070079MODULE_AUTHOR("Eliezer Tamir");
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000080MODULE_DESCRIPTION("Broadcom NetXtreme II "
81 "BCM57710/57711/57711E/57712/57712E Driver");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020082MODULE_LICENSE("GPL");
83MODULE_VERSION(DRV_MODULE_VERSION);
Ben Hutchings45229b42009-11-07 11:53:39 +000084MODULE_FIRMWARE(FW_FILE_NAME_E1);
85MODULE_FIRMWARE(FW_FILE_NAME_E1H);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000086MODULE_FIRMWARE(FW_FILE_NAME_E2);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020087
Eilon Greenstein555f6c72009-02-12 08:36:11 +000088static int multi_mode = 1;
89module_param(multi_mode, int, 0);
Eilon Greensteinca003922009-08-12 22:53:28 -070090MODULE_PARM_DESC(multi_mode, " Multi queue mode "
91 "(0 Disable; 1 Enable (default))");
92
Dmitry Kravkovd6214d72010-10-06 03:32:10 +000093int num_queues;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000094module_param(num_queues, int, 0);
95MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
96 " (default is as a number of CPUs)");
Eilon Greenstein555f6c72009-02-12 08:36:11 +000097
Eilon Greenstein19680c42008-08-13 15:47:33 -070098static int disable_tpa;
Eilon Greenstein19680c42008-08-13 15:47:33 -070099module_param(disable_tpa, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000100MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
Eilon Greenstein8badd272009-02-12 08:36:15 +0000101
102static int int_mode;
103module_param(int_mode, int, 0);
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000104MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
105 "(1 INT#x; 2 MSI)");
Eilon Greenstein8badd272009-02-12 08:36:15 +0000106
Eilon Greensteina18f5122009-08-12 08:23:26 +0000107static int dropless_fc;
108module_param(dropless_fc, int, 0);
109MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110
Eilon Greenstein9898f862009-02-12 08:38:27 +0000111static int poll;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200112module_param(poll, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000113MODULE_PARM_DESC(poll, " Use polling (for debug)");
Eilon Greenstein8d5726c2009-02-12 08:37:19 +0000114
115static int mrrs = -1;
116module_param(mrrs, int, 0);
117MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
Eilon Greenstein9898f862009-02-12 08:38:27 +0000119static int debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200120module_param(debug, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000121MODULE_PARM_DESC(debug, " Default debug msglevel");
122
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800123static struct workqueue_struct *bnx2x_wq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200124
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000125#ifdef BCM_CNIC
126static u8 ALL_ENODE_MACS[] = {0x01, 0x10, 0x18, 0x01, 0x00, 0x01};
127#endif
128
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200129enum bnx2x_board_type {
130 BCM57710 = 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700131 BCM57711 = 1,
132 BCM57711E = 2,
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000133 BCM57712 = 3,
134 BCM57712E = 4
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200135};
136
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700137/* indexed by board_type, above */
Andrew Morton53a10562008-02-09 23:16:41 -0800138static struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200139 char *name;
140} board_info[] __devinitdata = {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700141 { "Broadcom NetXtreme II BCM57710 XGb" },
142 { "Broadcom NetXtreme II BCM57711 XGb" },
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000143 { "Broadcom NetXtreme II BCM57711E XGb" },
144 { "Broadcom NetXtreme II BCM57712 XGb" },
145 { "Broadcom NetXtreme II BCM57712E XGb" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200146};
147
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000148#ifndef PCI_DEVICE_ID_NX2_57712
149#define PCI_DEVICE_ID_NX2_57712 0x1662
150#endif
151#ifndef PCI_DEVICE_ID_NX2_57712E
152#define PCI_DEVICE_ID_NX2_57712E 0x1663
153#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700154
Alexey Dobriyana3aa1882010-01-07 11:58:11 +0000155static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
Eilon Greensteine4ed7112009-08-12 08:24:10 +0000156 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
157 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
158 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000159 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
160 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200161 { 0 }
162};
163
164MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
165
166/****************************************************************************
167* General service functions
168****************************************************************************/
169
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000170static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
171 u32 addr, dma_addr_t mapping)
172{
173 REG_WR(bp, addr, U64_LO(mapping));
174 REG_WR(bp, addr + 4, U64_HI(mapping));
175}
176
177static inline void __storm_memset_fill(struct bnx2x *bp,
178 u32 addr, size_t size, u32 val)
179{
180 int i;
181 for (i = 0; i < size/4; i++)
182 REG_WR(bp, addr + (i * 4), val);
183}
184
185static inline void storm_memset_ustats_zero(struct bnx2x *bp,
186 u8 port, u16 stat_id)
187{
188 size_t size = sizeof(struct ustorm_per_client_stats);
189
190 u32 addr = BAR_USTRORM_INTMEM +
191 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
192
193 __storm_memset_fill(bp, addr, size, 0);
194}
195
196static inline void storm_memset_tstats_zero(struct bnx2x *bp,
197 u8 port, u16 stat_id)
198{
199 size_t size = sizeof(struct tstorm_per_client_stats);
200
201 u32 addr = BAR_TSTRORM_INTMEM +
202 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
203
204 __storm_memset_fill(bp, addr, size, 0);
205}
206
207static inline void storm_memset_xstats_zero(struct bnx2x *bp,
208 u8 port, u16 stat_id)
209{
210 size_t size = sizeof(struct xstorm_per_client_stats);
211
212 u32 addr = BAR_XSTRORM_INTMEM +
213 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
214
215 __storm_memset_fill(bp, addr, size, 0);
216}
217
218
219static inline void storm_memset_spq_addr(struct bnx2x *bp,
220 dma_addr_t mapping, u16 abs_fid)
221{
222 u32 addr = XSEM_REG_FAST_MEMORY +
223 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
224
225 __storm_memset_dma_mapping(bp, addr, mapping);
226}
227
228static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
229{
230 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
231}
232
233static inline void storm_memset_func_cfg(struct bnx2x *bp,
234 struct tstorm_eth_function_common_config *tcfg,
235 u16 abs_fid)
236{
237 size_t size = sizeof(struct tstorm_eth_function_common_config);
238
239 u32 addr = BAR_TSTRORM_INTMEM +
240 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
241
242 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
243}
244
245static inline void storm_memset_xstats_flags(struct bnx2x *bp,
246 struct stats_indication_flags *flags,
247 u16 abs_fid)
248{
249 size_t size = sizeof(struct stats_indication_flags);
250
251 u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
252
253 __storm_memset_struct(bp, addr, size, (u32 *)flags);
254}
255
256static inline void storm_memset_tstats_flags(struct bnx2x *bp,
257 struct stats_indication_flags *flags,
258 u16 abs_fid)
259{
260 size_t size = sizeof(struct stats_indication_flags);
261
262 u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
263
264 __storm_memset_struct(bp, addr, size, (u32 *)flags);
265}
266
267static inline void storm_memset_ustats_flags(struct bnx2x *bp,
268 struct stats_indication_flags *flags,
269 u16 abs_fid)
270{
271 size_t size = sizeof(struct stats_indication_flags);
272
273 u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
274
275 __storm_memset_struct(bp, addr, size, (u32 *)flags);
276}
277
278static inline void storm_memset_cstats_flags(struct bnx2x *bp,
279 struct stats_indication_flags *flags,
280 u16 abs_fid)
281{
282 size_t size = sizeof(struct stats_indication_flags);
283
284 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
285
286 __storm_memset_struct(bp, addr, size, (u32 *)flags);
287}
288
289static inline void storm_memset_xstats_addr(struct bnx2x *bp,
290 dma_addr_t mapping, u16 abs_fid)
291{
292 u32 addr = BAR_XSTRORM_INTMEM +
293 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
294
295 __storm_memset_dma_mapping(bp, addr, mapping);
296}
297
298static inline void storm_memset_tstats_addr(struct bnx2x *bp,
299 dma_addr_t mapping, u16 abs_fid)
300{
301 u32 addr = BAR_TSTRORM_INTMEM +
302 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
303
304 __storm_memset_dma_mapping(bp, addr, mapping);
305}
306
307static inline void storm_memset_ustats_addr(struct bnx2x *bp,
308 dma_addr_t mapping, u16 abs_fid)
309{
310 u32 addr = BAR_USTRORM_INTMEM +
311 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
312
313 __storm_memset_dma_mapping(bp, addr, mapping);
314}
315
316static inline void storm_memset_cstats_addr(struct bnx2x *bp,
317 dma_addr_t mapping, u16 abs_fid)
318{
319 u32 addr = BAR_CSTRORM_INTMEM +
320 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
321
322 __storm_memset_dma_mapping(bp, addr, mapping);
323}
324
325static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
326 u16 pf_id)
327{
328 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
329 pf_id);
330 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
331 pf_id);
332 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
333 pf_id);
334 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
335 pf_id);
336}
337
338static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
339 u8 enable)
340{
341 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
342 enable);
343 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
344 enable);
345 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
346 enable);
347 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
348 enable);
349}
350
351static inline void storm_memset_eq_data(struct bnx2x *bp,
352 struct event_ring_data *eq_data,
353 u16 pfid)
354{
355 size_t size = sizeof(struct event_ring_data);
356
357 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
358
359 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
360}
361
362static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
363 u16 pfid)
364{
365 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
366 REG_WR16(bp, addr, eq_prod);
367}
368
369static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
370 u16 fw_sb_id, u8 sb_index,
371 u8 ticks)
372{
373
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000374 int index_offset = CHIP_IS_E2(bp) ?
375 offsetof(struct hc_status_block_data_e2, index_data) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000376 offsetof(struct hc_status_block_data_e1x, index_data);
377 u32 addr = BAR_CSTRORM_INTMEM +
378 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
379 index_offset +
380 sizeof(struct hc_index_data)*sb_index +
381 offsetof(struct hc_index_data, timeout);
382 REG_WR8(bp, addr, ticks);
383 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
384 port, fw_sb_id, sb_index, ticks);
385}
386static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
387 u16 fw_sb_id, u8 sb_index,
388 u8 disable)
389{
390 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000391 int index_offset = CHIP_IS_E2(bp) ?
392 offsetof(struct hc_status_block_data_e2, index_data) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000393 offsetof(struct hc_status_block_data_e1x, index_data);
394 u32 addr = BAR_CSTRORM_INTMEM +
395 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
396 index_offset +
397 sizeof(struct hc_index_data)*sb_index +
398 offsetof(struct hc_index_data, flags);
399 u16 flags = REG_RD16(bp, addr);
400 /* clear and set */
401 flags &= ~HC_INDEX_DATA_HC_ENABLED;
402 flags |= enable_flag;
403 REG_WR16(bp, addr, flags);
404 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
405 port, fw_sb_id, sb_index, disable);
406}
407
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200408/* used only at init
409 * locking is done by mcp
410 */
stephen hemminger8d962862010-10-21 07:50:56 +0000411static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200412{
413 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
414 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
415 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
416 PCICFG_VENDOR_ID_OFFSET);
417}
418
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200419static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
420{
421 u32 val;
422
423 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
424 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
425 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
426 PCICFG_VENDOR_ID_OFFSET);
427
428 return val;
429}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200430
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000431#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
432#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
433#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
434#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
435#define DMAE_DP_DST_NONE "dst_addr [none]"
436
stephen hemminger8d962862010-10-21 07:50:56 +0000437static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
438 int msglvl)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000439{
440 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
441
442 switch (dmae->opcode & DMAE_COMMAND_DST) {
443 case DMAE_CMD_DST_PCI:
444 if (src_type == DMAE_CMD_SRC_PCI)
445 DP(msglvl, "DMAE: opcode 0x%08x\n"
446 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
447 "comp_addr [%x:%08x], comp_val 0x%08x\n",
448 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
449 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
450 dmae->comp_addr_hi, dmae->comp_addr_lo,
451 dmae->comp_val);
452 else
453 DP(msglvl, "DMAE: opcode 0x%08x\n"
454 "src [%08x], len [%d*4], dst [%x:%08x]\n"
455 "comp_addr [%x:%08x], comp_val 0x%08x\n",
456 dmae->opcode, dmae->src_addr_lo >> 2,
457 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
458 dmae->comp_addr_hi, dmae->comp_addr_lo,
459 dmae->comp_val);
460 break;
461 case DMAE_CMD_DST_GRC:
462 if (src_type == DMAE_CMD_SRC_PCI)
463 DP(msglvl, "DMAE: opcode 0x%08x\n"
464 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
465 "comp_addr [%x:%08x], comp_val 0x%08x\n",
466 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
467 dmae->len, dmae->dst_addr_lo >> 2,
468 dmae->comp_addr_hi, dmae->comp_addr_lo,
469 dmae->comp_val);
470 else
471 DP(msglvl, "DMAE: opcode 0x%08x\n"
472 "src [%08x], len [%d*4], dst [%08x]\n"
473 "comp_addr [%x:%08x], comp_val 0x%08x\n",
474 dmae->opcode, dmae->src_addr_lo >> 2,
475 dmae->len, dmae->dst_addr_lo >> 2,
476 dmae->comp_addr_hi, dmae->comp_addr_lo,
477 dmae->comp_val);
478 break;
479 default:
480 if (src_type == DMAE_CMD_SRC_PCI)
481 DP(msglvl, "DMAE: opcode 0x%08x\n"
482 DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
483 "dst_addr [none]\n"
484 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
485 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
486 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
487 dmae->comp_val);
488 else
489 DP(msglvl, "DMAE: opcode 0x%08x\n"
490 DP_LEVEL "src_addr [%08x] len [%d * 4] "
491 "dst_addr [none]\n"
492 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
493 dmae->opcode, dmae->src_addr_lo >> 2,
494 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
495 dmae->comp_val);
496 break;
497 }
498
499}
500
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000501const u32 dmae_reg_go_c[] = {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200502 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
503 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
504 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
505 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
506};
507
508/* copy command into DMAE command memory and set DMAE command go */
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000509void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200510{
511 u32 cmd_offset;
512 int i;
513
514 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
515 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
516 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
517
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700518 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
519 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200520 }
521 REG_WR(bp, dmae_reg_go_c[idx], 1);
522}
523
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000524u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
525{
526 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
527 DMAE_CMD_C_ENABLE);
528}
529
530u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
531{
532 return opcode & ~DMAE_CMD_SRC_RESET;
533}
534
535u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
536 bool with_comp, u8 comp_type)
537{
538 u32 opcode = 0;
539
540 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
541 (dst_type << DMAE_COMMAND_DST_SHIFT));
542
543 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
544
545 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
546 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
547 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
548 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
549
550#ifdef __BIG_ENDIAN
551 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
552#else
553 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
554#endif
555 if (with_comp)
556 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
557 return opcode;
558}
559
stephen hemminger8d962862010-10-21 07:50:56 +0000560static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
561 struct dmae_command *dmae,
562 u8 src_type, u8 dst_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000563{
564 memset(dmae, 0, sizeof(struct dmae_command));
565
566 /* set the opcode */
567 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
568 true, DMAE_COMP_PCI);
569
570 /* fill in the completion parameters */
571 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
572 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
573 dmae->comp_val = DMAE_COMP_VAL;
574}
575
576/* issue a dmae command over the init-channel and wailt for completion */
stephen hemminger8d962862010-10-21 07:50:56 +0000577static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
578 struct dmae_command *dmae)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000579{
580 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
581 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
582 int rc = 0;
583
584 DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
585 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
586 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
587
588 /* lock the dmae channel */
589 mutex_lock(&bp->dmae_mutex);
590
591 /* reset completion */
592 *wb_comp = 0;
593
594 /* post the command on the channel used for initializations */
595 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
596
597 /* wait for completion */
598 udelay(5);
599 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
600 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
601
602 if (!cnt) {
603 BNX2X_ERR("DMAE timeout!\n");
604 rc = DMAE_TIMEOUT;
605 goto unlock;
606 }
607 cnt--;
608 udelay(50);
609 }
610 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
611 BNX2X_ERR("DMAE PCI error!\n");
612 rc = DMAE_PCI_ERROR;
613 }
614
615 DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
616 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
617 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
618
619unlock:
620 mutex_unlock(&bp->dmae_mutex);
621 return rc;
622}
623
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700624void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
625 u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200626{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000627 struct dmae_command dmae;
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700628
629 if (!bp->dmae_ready) {
630 u32 *data = bnx2x_sp(bp, wb_data[0]);
631
632 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
633 " using indirect\n", dst_addr, len32);
634 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
635 return;
636 }
637
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000638 /* set opcode and fixed command fields */
639 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200640
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000641 /* fill in addresses and len */
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000642 dmae.src_addr_lo = U64_LO(dma_addr);
643 dmae.src_addr_hi = U64_HI(dma_addr);
644 dmae.dst_addr_lo = dst_addr >> 2;
645 dmae.dst_addr_hi = 0;
646 dmae.len = len32;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200647
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000648 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200649
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000650 /* issue the command and wait for completion */
651 bnx2x_issue_dmae_with_comp(bp, &dmae);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200652}
653
Yaniv Rosnerc18487e2008-06-23 20:27:52 -0700654void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200655{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000656 struct dmae_command dmae;
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700657
658 if (!bp->dmae_ready) {
659 u32 *data = bnx2x_sp(bp, wb_data[0]);
660 int i;
661
662 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
663 " using indirect\n", src_addr, len32);
664 for (i = 0; i < len32; i++)
665 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
666 return;
667 }
668
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000669 /* set opcode and fixed command fields */
670 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200671
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000672 /* fill in addresses and len */
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000673 dmae.src_addr_lo = src_addr >> 2;
674 dmae.src_addr_hi = 0;
675 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
676 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
677 dmae.len = len32;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200678
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000679 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200680
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000681 /* issue the command and wait for completion */
682 bnx2x_issue_dmae_with_comp(bp, &dmae);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200683}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200684
stephen hemminger8d962862010-10-21 07:50:56 +0000685static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
686 u32 addr, u32 len)
Eilon Greenstein573f2032009-08-12 08:24:14 +0000687{
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000688 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
Eilon Greenstein573f2032009-08-12 08:24:14 +0000689 int offset = 0;
690
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000691 while (len > dmae_wr_max) {
Eilon Greenstein573f2032009-08-12 08:24:14 +0000692 bnx2x_write_dmae(bp, phys_addr + offset,
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000693 addr + offset, dmae_wr_max);
694 offset += dmae_wr_max * 4;
695 len -= dmae_wr_max;
Eilon Greenstein573f2032009-08-12 08:24:14 +0000696 }
697
698 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
699}
700
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700701/* used only for slowpath so not inlined */
702static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
703{
704 u32 wb_write[2];
705
706 wb_write[0] = val_hi;
707 wb_write[1] = val_lo;
708 REG_WR_DMAE(bp, reg, wb_write, 2);
709}
710
711#ifdef USE_WB_RD
712static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
713{
714 u32 wb_data[2];
715
716 REG_RD_DMAE(bp, reg, wb_data, 2);
717
718 return HILO_U64(wb_data[0], wb_data[1]);
719}
720#endif
721
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200722static int bnx2x_mc_assert(struct bnx2x *bp)
723{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200724 char last_idx;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700725 int i, rc = 0;
726 u32 row0, row1, row2, row3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200727
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700728 /* XSTORM */
729 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
730 XSTORM_ASSERT_LIST_INDEX_OFFSET);
731 if (last_idx)
732 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200733
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700734 /* print the asserts */
735 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200736
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700737 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
738 XSTORM_ASSERT_LIST_OFFSET(i));
739 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
740 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
741 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
742 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
743 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
744 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200745
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700746 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
747 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
748 " 0x%08x 0x%08x 0x%08x\n",
749 i, row3, row2, row1, row0);
750 rc++;
751 } else {
752 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200753 }
754 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700755
756 /* TSTORM */
757 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
758 TSTORM_ASSERT_LIST_INDEX_OFFSET);
759 if (last_idx)
760 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
761
762 /* print the asserts */
763 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
764
765 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
766 TSTORM_ASSERT_LIST_OFFSET(i));
767 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
768 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
769 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
770 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
771 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
772 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
773
774 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
775 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
776 " 0x%08x 0x%08x 0x%08x\n",
777 i, row3, row2, row1, row0);
778 rc++;
779 } else {
780 break;
781 }
782 }
783
784 /* CSTORM */
785 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
786 CSTORM_ASSERT_LIST_INDEX_OFFSET);
787 if (last_idx)
788 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
789
790 /* print the asserts */
791 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
792
793 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
794 CSTORM_ASSERT_LIST_OFFSET(i));
795 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
796 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
797 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
798 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
799 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
800 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
801
802 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
803 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
804 " 0x%08x 0x%08x 0x%08x\n",
805 i, row3, row2, row1, row0);
806 rc++;
807 } else {
808 break;
809 }
810 }
811
812 /* USTORM */
813 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
814 USTORM_ASSERT_LIST_INDEX_OFFSET);
815 if (last_idx)
816 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
817
818 /* print the asserts */
819 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
820
821 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
822 USTORM_ASSERT_LIST_OFFSET(i));
823 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
824 USTORM_ASSERT_LIST_OFFSET(i) + 4);
825 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
826 USTORM_ASSERT_LIST_OFFSET(i) + 8);
827 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
828 USTORM_ASSERT_LIST_OFFSET(i) + 12);
829
830 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
831 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
832 " 0x%08x 0x%08x 0x%08x\n",
833 i, row3, row2, row1, row0);
834 rc++;
835 } else {
836 break;
837 }
838 }
839
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200840 return rc;
841}
Eliezer Tamirc14423f2008-02-28 11:49:42 -0800842
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200843static void bnx2x_fw_dump(struct bnx2x *bp)
844{
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000845 u32 addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200846 u32 mark, offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +0000847 __be32 data[9];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200848 int word;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000849 u32 trace_shmem_base;
Vladislav Zolotarov2145a922010-04-19 01:13:49 +0000850 if (BP_NOMCP(bp)) {
851 BNX2X_ERR("NO MCP - can not dump\n");
852 return;
853 }
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000854
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000855 if (BP_PATH(bp) == 0)
856 trace_shmem_base = bp->common.shmem_base;
857 else
858 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
859 addr = trace_shmem_base - 0x0800 + 4;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000860 mark = REG_RD(bp, addr);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000861 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
862 + ((mark + 0x3) & ~0x3) - 0x08000000;
Joe Perches7995c642010-02-17 15:01:52 +0000863 pr_err("begin fw dump (mark 0x%x)\n", mark);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200864
Joe Perches7995c642010-02-17 15:01:52 +0000865 pr_err("");
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000866 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200867 for (word = 0; word < 8; word++)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000868 data[word] = htonl(REG_RD(bp, offset + 4*word));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200869 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000870 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200871 }
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000872 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200873 for (word = 0; word < 8; word++)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000874 data[word] = htonl(REG_RD(bp, offset + 4*word));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200875 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000876 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200877 }
Joe Perches7995c642010-02-17 15:01:52 +0000878 pr_err("end of fw dump\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200879}
880
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000881void bnx2x_panic_dump(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200882{
883 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000884 u16 j;
885 struct hc_sp_status_block_data sp_sb_data;
886 int func = BP_FUNC(bp);
887#ifdef BNX2X_STOP_ON_ERROR
888 u16 start = 0, end = 0;
889#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200890
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700891 bp->stats_state = STATS_STATE_DISABLED;
892 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
893
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200894 BNX2X_ERR("begin crash dump -----------------\n");
895
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000896 /* Indices */
897 /* Common */
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000898 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000899 " spq_prod_idx(0x%x)\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000900 bp->def_idx, bp->def_att_idx,
901 bp->attn_state, bp->spq_prod_idx);
902 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
903 bp->def_status_blk->atten_status_block.attn_bits,
904 bp->def_status_blk->atten_status_block.attn_bits_ack,
905 bp->def_status_blk->atten_status_block.status_block_id,
906 bp->def_status_blk->atten_status_block.attn_bits_index);
907 BNX2X_ERR(" def (");
908 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
909 pr_cont("0x%x%s",
910 bp->def_status_blk->sp_sb.index_values[i],
911 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000912
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000913 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
914 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
915 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
916 i*sizeof(u32));
917
918 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
919 "pf_id(0x%x) vnic_id(0x%x) "
920 "vf_id(0x%x) vf_valid (0x%x)\n",
921 sp_sb_data.igu_sb_id,
922 sp_sb_data.igu_seg_id,
923 sp_sb_data.p_func.pf_id,
924 sp_sb_data.p_func.vnic_id,
925 sp_sb_data.p_func.vf_id,
926 sp_sb_data.p_func.vf_valid);
927
928
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000929 for_each_eth_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000930 struct bnx2x_fastpath *fp = &bp->fp[i];
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000931 int loop;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000932 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000933 struct hc_status_block_data_e1x sb_data_e1x;
934 struct hc_status_block_sm *hc_sm_p =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000935 CHIP_IS_E2(bp) ?
936 sb_data_e2.common.state_machine :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000937 sb_data_e1x.common.state_machine;
938 struct hc_index_data *hc_index_p =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000939 CHIP_IS_E2(bp) ?
940 sb_data_e2.index_data :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000941 sb_data_e1x.index_data;
942 int data_size;
943 u32 *sb_data_p;
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000944
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000945 /* Rx */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000946 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000947 " rx_comp_prod(0x%x)"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000948 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000949 i, fp->rx_bd_prod, fp->rx_bd_cons,
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000950 fp->rx_comp_prod,
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000951 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000952 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000953 " fp_hc_idx(0x%x)\n",
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000954 fp->rx_sge_prod, fp->last_max_sge,
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000955 le16_to_cpu(fp->fp_hc_idx));
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000956
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000957 /* Tx */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000958 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
959 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
960 " *tx_cons_sb(0x%x)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200961 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700962 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000963
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000964 loop = CHIP_IS_E2(bp) ?
965 HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000966
967 /* host sb data */
968
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000969#ifdef BCM_CNIC
970 if (IS_FCOE_FP(fp))
971 continue;
972#endif
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000973 BNX2X_ERR(" run indexes (");
974 for (j = 0; j < HC_SB_MAX_SM; j++)
975 pr_cont("0x%x%s",
976 fp->sb_running_index[j],
977 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
978
979 BNX2X_ERR(" indexes (");
980 for (j = 0; j < loop; j++)
981 pr_cont("0x%x%s",
982 fp->sb_index_values[j],
983 (j == loop - 1) ? ")" : " ");
984 /* fw sb data */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000985 data_size = CHIP_IS_E2(bp) ?
986 sizeof(struct hc_status_block_data_e2) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000987 sizeof(struct hc_status_block_data_e1x);
988 data_size /= sizeof(u32);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000989 sb_data_p = CHIP_IS_E2(bp) ?
990 (u32 *)&sb_data_e2 :
991 (u32 *)&sb_data_e1x;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000992 /* copy sb data in here */
993 for (j = 0; j < data_size; j++)
994 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
995 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
996 j * sizeof(u32));
997
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000998 if (CHIP_IS_E2(bp)) {
999 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
1000 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
1001 sb_data_e2.common.p_func.pf_id,
1002 sb_data_e2.common.p_func.vf_id,
1003 sb_data_e2.common.p_func.vf_valid,
1004 sb_data_e2.common.p_func.vnic_id,
1005 sb_data_e2.common.same_igu_sb_1b);
1006 } else {
1007 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
1008 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
1009 sb_data_e1x.common.p_func.pf_id,
1010 sb_data_e1x.common.p_func.vf_id,
1011 sb_data_e1x.common.p_func.vf_valid,
1012 sb_data_e1x.common.p_func.vnic_id,
1013 sb_data_e1x.common.same_igu_sb_1b);
1014 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001015
1016 /* SB_SMs data */
1017 for (j = 0; j < HC_SB_MAX_SM; j++) {
1018 pr_cont("SM[%d] __flags (0x%x) "
1019 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
1020 "time_to_expire (0x%x) "
1021 "timer_value(0x%x)\n", j,
1022 hc_sm_p[j].__flags,
1023 hc_sm_p[j].igu_sb_id,
1024 hc_sm_p[j].igu_seg_id,
1025 hc_sm_p[j].time_to_expire,
1026 hc_sm_p[j].timer_value);
1027 }
1028
1029 /* Indecies data */
1030 for (j = 0; j < loop; j++) {
1031 pr_cont("INDEX[%d] flags (0x%x) "
1032 "timeout (0x%x)\n", j,
1033 hc_index_p[j].flags,
1034 hc_index_p[j].timeout);
1035 }
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001036 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001037
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001038#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001039 /* Rings */
1040 /* Rx */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001041 for_each_rx_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001042 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001043
1044 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1045 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001046 for (j = start; j != end; j = RX_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001047 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1048 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1049
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001050 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1051 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001052 }
1053
Eilon Greenstein3196a882008-08-13 15:58:49 -07001054 start = RX_SGE(fp->rx_sge_prod);
1055 end = RX_SGE(fp->last_max_sge);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001056 for (j = start; j != end; j = RX_SGE(j + 1)) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001057 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1058 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1059
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001060 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1061 i, j, rx_sge[1], rx_sge[0], sw_page->page);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001062 }
1063
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001064 start = RCQ_BD(fp->rx_comp_cons - 10);
1065 end = RCQ_BD(fp->rx_comp_cons + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001066 for (j = start; j != end; j = RCQ_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001067 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1068
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001069 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1070 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001071 }
1072 }
1073
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001074 /* Tx */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001075 for_each_tx_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001076 struct bnx2x_fastpath *fp = &bp->fp[i];
1077
1078 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
1079 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
1080 for (j = start; j != end; j = TX_BD(j + 1)) {
1081 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
1082
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001083 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
1084 i, j, sw_bd->skb, sw_bd->first_bd);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001085 }
1086
1087 start = TX_BD(fp->tx_bd_cons - 10);
1088 end = TX_BD(fp->tx_bd_cons + 254);
1089 for (j = start; j != end; j = TX_BD(j + 1)) {
1090 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
1091
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001092 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
1093 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001094 }
1095 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001096#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001097 bnx2x_fw_dump(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001098 bnx2x_mc_assert(bp);
1099 BNX2X_ERR("end crash dump -----------------\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001100}
1101
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001102static void bnx2x_hc_int_enable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001103{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001104 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001105 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1106 u32 val = REG_RD(bp, addr);
1107 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +00001108 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001109
1110 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001111 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1112 HC_CONFIG_0_REG_INT_LINE_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001113 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1114 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eilon Greenstein8badd272009-02-12 08:36:15 +00001115 } else if (msi) {
1116 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1117 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1118 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1119 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001120 } else {
1121 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001122 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001123 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1124 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001125
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001126 if (!CHIP_IS_E1(bp)) {
1127 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1128 val, port, addr);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001129
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001130 REG_WR(bp, addr, val);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001131
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001132 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1133 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001134 }
1135
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001136 if (CHIP_IS_E1(bp))
1137 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1138
Eilon Greenstein8badd272009-02-12 08:36:15 +00001139 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
1140 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001141
1142 REG_WR(bp, addr, val);
Eilon Greenstein37dbbf32009-07-21 05:47:33 +00001143 /*
1144 * Ensure that HC_CONFIG is written before leading/trailing edge config
1145 */
1146 mmiowb();
1147 barrier();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001148
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001149 if (!CHIP_IS_E1(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001150 /* init leading/trailing edge */
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00001151 if (IS_MF(bp)) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001152 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001153 if (bp->port.pmf)
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001154 /* enable nig and gpio3 attention */
1155 val |= 0x1100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001156 } else
1157 val = 0xffff;
1158
1159 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1160 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1161 }
Eilon Greenstein37dbbf32009-07-21 05:47:33 +00001162
1163 /* Make sure that interrupts are indeed enabled from here on */
1164 mmiowb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001165}
1166
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001167static void bnx2x_igu_int_enable(struct bnx2x *bp)
1168{
1169 u32 val;
1170 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1171 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1172
1173 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1174
1175 if (msix) {
1176 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1177 IGU_PF_CONF_SINGLE_ISR_EN);
1178 val |= (IGU_PF_CONF_FUNC_EN |
1179 IGU_PF_CONF_MSI_MSIX_EN |
1180 IGU_PF_CONF_ATTN_BIT_EN);
1181 } else if (msi) {
1182 val &= ~IGU_PF_CONF_INT_LINE_EN;
1183 val |= (IGU_PF_CONF_FUNC_EN |
1184 IGU_PF_CONF_MSI_MSIX_EN |
1185 IGU_PF_CONF_ATTN_BIT_EN |
1186 IGU_PF_CONF_SINGLE_ISR_EN);
1187 } else {
1188 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1189 val |= (IGU_PF_CONF_FUNC_EN |
1190 IGU_PF_CONF_INT_LINE_EN |
1191 IGU_PF_CONF_ATTN_BIT_EN |
1192 IGU_PF_CONF_SINGLE_ISR_EN);
1193 }
1194
1195 DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
1196 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1197
1198 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1199
1200 barrier();
1201
1202 /* init leading/trailing edge */
1203 if (IS_MF(bp)) {
1204 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1205 if (bp->port.pmf)
1206 /* enable nig and gpio3 attention */
1207 val |= 0x1100;
1208 } else
1209 val = 0xffff;
1210
1211 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1212 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1213
1214 /* Make sure that interrupts are indeed enabled from here on */
1215 mmiowb();
1216}
1217
1218void bnx2x_int_enable(struct bnx2x *bp)
1219{
1220 if (bp->common.int_block == INT_BLOCK_HC)
1221 bnx2x_hc_int_enable(bp);
1222 else
1223 bnx2x_igu_int_enable(bp);
1224}
1225
1226static void bnx2x_hc_int_disable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001227{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001228 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001229 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1230 u32 val = REG_RD(bp, addr);
1231
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001232 /*
1233 * in E1 we must use only PCI configuration space to disable
1234 * MSI/MSIX capablility
1235 * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
1236 */
1237 if (CHIP_IS_E1(bp)) {
1238 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
1239 * Use mask register to prevent from HC sending interrupts
1240 * after we exit the function
1241 */
1242 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
1243
1244 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1245 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1246 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1247 } else
1248 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1249 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1250 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1251 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001252
1253 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1254 val, port, addr);
1255
Eilon Greenstein8badd272009-02-12 08:36:15 +00001256 /* flush all outstanding writes */
1257 mmiowb();
1258
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001259 REG_WR(bp, addr, val);
1260 if (REG_RD(bp, addr) != val)
1261 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1262}
1263
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001264static void bnx2x_igu_int_disable(struct bnx2x *bp)
1265{
1266 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1267
1268 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1269 IGU_PF_CONF_INT_LINE_EN |
1270 IGU_PF_CONF_ATTN_BIT_EN);
1271
1272 DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1273
1274 /* flush all outstanding writes */
1275 mmiowb();
1276
1277 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1278 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1279 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1280}
1281
stephen hemminger8d962862010-10-21 07:50:56 +00001282static void bnx2x_int_disable(struct bnx2x *bp)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001283{
1284 if (bp->common.int_block == INT_BLOCK_HC)
1285 bnx2x_hc_int_disable(bp);
1286 else
1287 bnx2x_igu_int_disable(bp);
1288}
1289
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001290void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001291{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001292 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +00001293 int i, offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001294
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001295 /* disable interrupt handling */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001296 atomic_inc(&bp->intr_sem);
Eilon Greensteine1510702009-07-21 05:47:41 +00001297 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1298
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07001299 if (disable_hw)
1300 /* prevent the HW from sending interrupts */
1301 bnx2x_int_disable(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001302
1303 /* make sure all ISRs are done */
1304 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001305 synchronize_irq(bp->msix_table[0].vector);
1306 offset = 1;
Michael Chan37b091b2009-10-10 13:46:55 +00001307#ifdef BCM_CNIC
1308 offset++;
1309#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001310 for_each_eth_queue(bp, i)
Eilon Greenstein8badd272009-02-12 08:36:15 +00001311 synchronize_irq(bp->msix_table[i + offset].vector);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001312 } else
1313 synchronize_irq(bp->pdev->irq);
1314
1315 /* make sure sp_task is not running */
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001316 cancel_delayed_work(&bp->sp_task);
1317 flush_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001318}
1319
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001320/* fast path */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001321
1322/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001323 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001324 */
1325
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001326/* Return true if succeeded to acquire the lock */
1327static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1328{
1329 u32 lock_status;
1330 u32 resource_bit = (1 << resource);
1331 int func = BP_FUNC(bp);
1332 u32 hw_lock_control_reg;
1333
1334 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1335
1336 /* Validating that the resource is within range */
1337 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1338 DP(NETIF_MSG_HW,
1339 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1340 resource, HW_LOCK_MAX_RESOURCE_VALUE);
Eric Dumazet0fdf4d02010-08-26 22:03:53 -07001341 return false;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001342 }
1343
1344 if (func <= 5)
1345 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1346 else
1347 hw_lock_control_reg =
1348 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1349
1350 /* Try to acquire the lock */
1351 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1352 lock_status = REG_RD(bp, hw_lock_control_reg);
1353 if (lock_status & resource_bit)
1354 return true;
1355
1356 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1357 return false;
1358}
1359
Michael Chan993ac7b2009-10-10 13:46:56 +00001360#ifdef BCM_CNIC
1361static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1362#endif
Eilon Greenstein3196a882008-08-13 15:58:49 -07001363
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001364void bnx2x_sp_event(struct bnx2x_fastpath *fp,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001365 union eth_rx_cqe *rr_cqe)
1366{
1367 struct bnx2x *bp = fp->bp;
1368 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1369 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1370
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001371 DP(BNX2X_MSG_SP,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001372 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +00001373 fp->index, cid, command, bp->state,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001374 rr_cqe->ramrod_cqe.ramrod_type);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001375
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001376 switch (command | fp->state) {
1377 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1378 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1379 fp->state = BNX2X_FP_STATE_OPEN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001380 break;
1381
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001382 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1383 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001384 fp->state = BNX2X_FP_STATE_HALTED;
1385 break;
1386
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001387 case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1388 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1389 fp->state = BNX2X_FP_STATE_TERMINATED;
Eliezer Tamir49d66772008-02-28 11:53:13 -08001390 break;
1391
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001392 default:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001393 BNX2X_ERR("unexpected MC reply (%d) "
1394 "fp[%d] state is %x\n",
1395 command, fp->index, fp->state);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001396 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001397 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001398
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00001399 smp_mb__before_atomic_inc();
1400 atomic_inc(&bp->spq_left);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001401 /* push the change in fp->state and towards the memory */
1402 smp_wmb();
1403
1404 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001405}
1406
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001407irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001408{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001409 struct bnx2x *bp = netdev_priv(dev_instance);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001410 u16 status = bnx2x_ack_int(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001411 u16 mask;
Eilon Greensteinca003922009-08-12 22:53:28 -07001412 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001413
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001414 /* Return here if interrupt is shared and it's not for us */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001415 if (unlikely(status == 0)) {
1416 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1417 return IRQ_NONE;
1418 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001419 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001420
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001421 /* Return here if interrupt is disabled */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001422 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1423 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1424 return IRQ_HANDLED;
1425 }
1426
Eilon Greenstein3196a882008-08-13 15:58:49 -07001427#ifdef BNX2X_STOP_ON_ERROR
1428 if (unlikely(bp->panic))
1429 return IRQ_HANDLED;
1430#endif
1431
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001432 for_each_eth_queue(bp, i) {
Eilon Greensteinca003922009-08-12 22:53:28 -07001433 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001434
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001435 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
Eilon Greensteinca003922009-08-12 22:53:28 -07001436 if (status & mask) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001437 /* Handle Rx and Tx according to SB id */
1438 prefetch(fp->rx_cons_sb);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001439 prefetch(fp->tx_cons_sb);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001440 prefetch(&fp->sb_running_index[SM_RX_ID]);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001441 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
Eilon Greensteinca003922009-08-12 22:53:28 -07001442 status &= ~mask;
1443 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001444 }
1445
Michael Chan993ac7b2009-10-10 13:46:56 +00001446#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001447 mask = 0x2;
Michael Chan993ac7b2009-10-10 13:46:56 +00001448 if (status & (mask | 0x1)) {
1449 struct cnic_ops *c_ops = NULL;
1450
1451 rcu_read_lock();
1452 c_ops = rcu_dereference(bp->cnic_ops);
1453 if (c_ops)
1454 c_ops->cnic_handler(bp->cnic_data, NULL);
1455 rcu_read_unlock();
1456
1457 status &= ~mask;
1458 }
1459#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001460
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001461 if (unlikely(status & 0x1)) {
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001462 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001463
1464 status &= ~0x1;
1465 if (!status)
1466 return IRQ_HANDLED;
1467 }
1468
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00001469 if (unlikely(status))
1470 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001471 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001472
1473 return IRQ_HANDLED;
1474}
1475
1476/* end of fast path */
1477
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001478
1479/* Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001480
1481/*
1482 * General service functions
1483 */
1484
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001485int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001486{
Eliezer Tamirf1410642008-02-28 11:51:50 -08001487 u32 lock_status;
1488 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001489 int func = BP_FUNC(bp);
1490 u32 hw_lock_control_reg;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001491 int cnt;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001492
1493 /* Validating that the resource is within range */
1494 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1495 DP(NETIF_MSG_HW,
1496 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1497 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1498 return -EINVAL;
1499 }
1500
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001501 if (func <= 5) {
1502 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1503 } else {
1504 hw_lock_control_reg =
1505 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1506 }
1507
Eliezer Tamirf1410642008-02-28 11:51:50 -08001508 /* Validating that the resource is not already taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001509 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001510 if (lock_status & resource_bit) {
1511 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1512 lock_status, resource_bit);
1513 return -EEXIST;
1514 }
1515
Eilon Greenstein46230476b2008-08-25 15:23:30 -07001516 /* Try for 5 second every 5ms */
1517 for (cnt = 0; cnt < 1000; cnt++) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08001518 /* Try to acquire the lock */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001519 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1520 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001521 if (lock_status & resource_bit)
1522 return 0;
1523
1524 msleep(5);
1525 }
1526 DP(NETIF_MSG_HW, "Timeout\n");
1527 return -EAGAIN;
1528}
1529
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001530int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001531{
1532 u32 lock_status;
1533 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001534 int func = BP_FUNC(bp);
1535 u32 hw_lock_control_reg;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001536
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001537 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1538
Eliezer Tamirf1410642008-02-28 11:51:50 -08001539 /* Validating that the resource is within range */
1540 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1541 DP(NETIF_MSG_HW,
1542 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1543 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1544 return -EINVAL;
1545 }
1546
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001547 if (func <= 5) {
1548 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1549 } else {
1550 hw_lock_control_reg =
1551 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1552 }
1553
Eliezer Tamirf1410642008-02-28 11:51:50 -08001554 /* Validating that the resource is currently taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001555 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001556 if (!(lock_status & resource_bit)) {
1557 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1558 lock_status, resource_bit);
1559 return -EFAULT;
1560 }
1561
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001562 REG_WR(bp, hw_lock_control_reg, resource_bit);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001563 return 0;
1564}
1565
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001566
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001567int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1568{
1569 /* The GPIO should be swapped if swap register is set and active */
1570 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1571 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1572 int gpio_shift = gpio_num +
1573 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1574 u32 gpio_mask = (1 << gpio_shift);
1575 u32 gpio_reg;
1576 int value;
1577
1578 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1579 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1580 return -EINVAL;
1581 }
1582
1583 /* read GPIO value */
1584 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1585
1586 /* get the requested pin value */
1587 if ((gpio_reg & gpio_mask) == gpio_mask)
1588 value = 1;
1589 else
1590 value = 0;
1591
1592 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1593
1594 return value;
1595}
1596
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001597int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001598{
1599 /* The GPIO should be swapped if swap register is set and active */
1600 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001601 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001602 int gpio_shift = gpio_num +
1603 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1604 u32 gpio_mask = (1 << gpio_shift);
1605 u32 gpio_reg;
1606
1607 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1608 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1609 return -EINVAL;
1610 }
1611
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001612 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001613 /* read GPIO and mask except the float bits */
1614 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1615
1616 switch (mode) {
1617 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1618 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1619 gpio_num, gpio_shift);
1620 /* clear FLOAT and set CLR */
1621 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1622 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1623 break;
1624
1625 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1626 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1627 gpio_num, gpio_shift);
1628 /* clear FLOAT and set SET */
1629 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1630 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1631 break;
1632
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001633 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001634 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1635 gpio_num, gpio_shift);
1636 /* set FLOAT */
1637 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1638 break;
1639
1640 default:
1641 break;
1642 }
1643
1644 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001645 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001646
1647 return 0;
1648}
1649
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001650int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1651{
1652 /* The GPIO should be swapped if swap register is set and active */
1653 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1654 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1655 int gpio_shift = gpio_num +
1656 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1657 u32 gpio_mask = (1 << gpio_shift);
1658 u32 gpio_reg;
1659
1660 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1661 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1662 return -EINVAL;
1663 }
1664
1665 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1666 /* read GPIO int */
1667 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1668
1669 switch (mode) {
1670 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1671 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1672 "output low\n", gpio_num, gpio_shift);
1673 /* clear SET and set CLR */
1674 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1675 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1676 break;
1677
1678 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1679 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1680 "output high\n", gpio_num, gpio_shift);
1681 /* clear CLR and set SET */
1682 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1683 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1684 break;
1685
1686 default:
1687 break;
1688 }
1689
1690 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1691 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1692
1693 return 0;
1694}
1695
Eliezer Tamirf1410642008-02-28 11:51:50 -08001696static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1697{
1698 u32 spio_mask = (1 << spio_num);
1699 u32 spio_reg;
1700
1701 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1702 (spio_num > MISC_REGISTERS_SPIO_7)) {
1703 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1704 return -EINVAL;
1705 }
1706
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001707 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001708 /* read SPIO and mask except the float bits */
1709 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1710
1711 switch (mode) {
Eilon Greenstein6378c022008-08-13 15:59:25 -07001712 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001713 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1714 /* clear FLOAT and set CLR */
1715 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1716 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1717 break;
1718
Eilon Greenstein6378c022008-08-13 15:59:25 -07001719 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001720 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1721 /* clear FLOAT and set SET */
1722 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1723 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1724 break;
1725
1726 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1727 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1728 /* set FLOAT */
1729 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1730 break;
1731
1732 default:
1733 break;
1734 }
1735
1736 REG_WR(bp, MISC_REG_SPIO, spio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001737 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001738
1739 return 0;
1740}
1741
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001742int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1743{
1744 u32 sel_phy_idx = 0;
1745 if (bp->link_vars.link_up) {
1746 sel_phy_idx = EXT_PHY1;
1747 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1748 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1749 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1750 sel_phy_idx = EXT_PHY2;
1751 } else {
1752
1753 switch (bnx2x_phy_selection(&bp->link_params)) {
1754 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1755 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1756 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1757 sel_phy_idx = EXT_PHY1;
1758 break;
1759 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1760 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1761 sel_phy_idx = EXT_PHY2;
1762 break;
1763 }
1764 }
1765 /*
1766 * The selected actived PHY is always after swapping (in case PHY
1767 * swapping is enabled). So when swapping is enabled, we need to reverse
1768 * the configuration
1769 */
1770
1771 if (bp->link_params.multi_phy_config &
1772 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1773 if (sel_phy_idx == EXT_PHY1)
1774 sel_phy_idx = EXT_PHY2;
1775 else if (sel_phy_idx == EXT_PHY2)
1776 sel_phy_idx = EXT_PHY1;
1777 }
1778 return LINK_CONFIG_IDX(sel_phy_idx);
1779}
1780
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001781void bnx2x_calc_fc_adv(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001782{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001783 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
Eilon Greensteinad33ea32009-01-14 21:24:57 -08001784 switch (bp->link_vars.ieee_fc &
1785 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001786 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001787 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001788 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001789 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001790
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001791 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001792 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001793 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001794 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001795
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001796 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001797 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001798 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001799
Eliezer Tamirf1410642008-02-28 11:51:50 -08001800 default:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001801 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001802 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001803 break;
1804 }
1805}
1806
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001807u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001808{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001809 if (!BP_NOMCP(bp)) {
1810 u8 rc;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001811 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1812 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
Eilon Greenstein19680c42008-08-13 15:47:33 -07001813 /* Initialize link parameters structure variables */
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07001814 /* It is recommended to turn off RX FC for jumbo frames
1815 for better performance */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001816 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
David S. Millerc0700f92008-12-16 23:53:20 -08001817 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07001818 else
David S. Millerc0700f92008-12-16 23:53:20 -08001819 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001820
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001821 bnx2x_acquire_phy_lock(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001822
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001823 if (load_mode == LOAD_DIAG) {
Yaniv Rosnerde6eae12010-09-07 11:41:13 +00001824 bp->link_params.loopback_mode = LOOPBACK_XGXS;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001825 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1826 }
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001827
Eilon Greenstein19680c42008-08-13 15:47:33 -07001828 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001829
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001830 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001831
Eilon Greenstein3c96c682009-01-14 21:25:31 -08001832 bnx2x_calc_fc_adv(bp);
1833
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001834 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1835 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001836 bnx2x_link_report(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001837 }
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001838 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
Eilon Greenstein19680c42008-08-13 15:47:33 -07001839 return rc;
1840 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001841 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
Eilon Greenstein19680c42008-08-13 15:47:33 -07001842 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001843}
1844
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001845void bnx2x_link_set(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001846{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001847 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001848 bnx2x_acquire_phy_lock(bp);
Yaniv Rosner54c2fb72010-09-01 09:51:23 +00001849 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001850 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001851 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001852
Eilon Greenstein19680c42008-08-13 15:47:33 -07001853 bnx2x_calc_fc_adv(bp);
1854 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00001855 BNX2X_ERR("Bootcode is missing - can not set link\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001856}
1857
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001858static void bnx2x__link_reset(struct bnx2x *bp)
1859{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001860 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001861 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein589abe32009-02-12 08:36:55 +00001862 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001863 bnx2x_release_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001864 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00001865 BNX2X_ERR("Bootcode is missing - can not reset link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001866}
1867
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001868u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001869{
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001870 u8 rc = 0;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001871
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001872 if (!BP_NOMCP(bp)) {
1873 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001874 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1875 is_serdes);
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001876 bnx2x_release_phy_lock(bp);
1877 } else
1878 BNX2X_ERR("Bootcode is missing - can not test link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001879
1880 return rc;
1881}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001882
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001883static void bnx2x_init_port_minmax(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001884{
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001885 u32 r_param = bp->link_vars.line_speed / 8;
1886 u32 fair_periodic_timeout_usec;
1887 u32 t_fair;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001888
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001889 memset(&(bp->cmng.rs_vars), 0,
1890 sizeof(struct rate_shaping_vars_per_port));
1891 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001892
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001893 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1894 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001895
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001896 /* this is the threshold below which no timer arming will occur
1897 1.25 coefficient is for the threshold to be a little bigger
1898 than the real time, to compensate for timer in-accuracy */
1899 bp->cmng.rs_vars.rs_threshold =
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001900 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1901
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001902 /* resolution of fairness timer */
1903 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1904 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1905 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001906
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001907 /* this is the threshold below which we won't arm the timer anymore */
1908 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001909
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001910 /* we multiply by 1e3/8 to get bytes/msec.
1911 We don't want the credits to pass a credit
1912 of the t_fair*FAIR_MEM (algorithm resolution) */
1913 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1914 /* since each tick is 4 usec */
1915 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001916}
1917
Eilon Greenstein2691d512009-08-12 08:22:08 +00001918/* Calculates the sum of vn_min_rates.
1919 It's needed for further normalizing of the min_rates.
1920 Returns:
1921 sum of vn_min_rates.
1922 or
1923 0 - if all the min_rates are 0.
1924 In the later case fainess algorithm should be deactivated.
1925 If not all min_rates are zero then those that are zeroes will be set to 1.
1926 */
1927static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1928{
1929 int all_zero = 1;
Eilon Greenstein2691d512009-08-12 08:22:08 +00001930 int vn;
1931
1932 bp->vn_weight_sum = 0;
1933 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001934 u32 vn_cfg = bp->mf_config[vn];
Eilon Greenstein2691d512009-08-12 08:22:08 +00001935 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1936 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1937
1938 /* Skip hidden vns */
1939 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1940 continue;
1941
1942 /* If min rate is zero - set it to 1 */
1943 if (!vn_min_rate)
1944 vn_min_rate = DEF_MIN_RATE;
1945 else
1946 all_zero = 0;
1947
1948 bp->vn_weight_sum += vn_min_rate;
1949 }
1950
1951 /* ... only if all min rates are zeros - disable fairness */
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001952 if (all_zero) {
1953 bp->cmng.flags.cmng_enables &=
1954 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1955 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1956 " fairness will be disabled\n");
1957 } else
1958 bp->cmng.flags.cmng_enables |=
1959 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
Eilon Greenstein2691d512009-08-12 08:22:08 +00001960}
1961
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001962static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001963{
1964 struct rate_shaping_vars_per_vn m_rs_vn;
1965 struct fairness_vars_per_vn m_fair_vn;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001966 u32 vn_cfg = bp->mf_config[vn];
1967 int func = 2*vn + BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001968 u16 vn_min_rate, vn_max_rate;
1969 int i;
1970
1971 /* If function is hidden - set min and max to zeroes */
1972 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1973 vn_min_rate = 0;
1974 vn_max_rate = 0;
1975
1976 } else {
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001977 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
1978
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001979 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1980 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001981 /* If fairness is enabled (not all min rates are zeroes) and
1982 if current min rate is zero - set it to 1.
1983 This is a requirement of the algorithm. */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001984 if (bp->vn_weight_sum && (vn_min_rate == 0))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001985 vn_min_rate = DEF_MIN_RATE;
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001986
1987 if (IS_MF_SI(bp))
1988 /* maxCfg in percents of linkspeed */
1989 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
1990 else
1991 /* maxCfg is absolute in 100Mb units */
1992 vn_max_rate = maxCfg * 100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001993 }
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001994
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001995 DP(NETIF_MSG_IFUP,
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001996 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001997 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001998
1999 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2000 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2001
2002 /* global vn counter - maximal Mbps for this vn */
2003 m_rs_vn.vn_counter.rate = vn_max_rate;
2004
2005 /* quota - number of bytes transmitted in this period */
2006 m_rs_vn.vn_counter.quota =
2007 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2008
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002009 if (bp->vn_weight_sum) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002010 /* credit for each period of the fairness algorithm:
2011 number of bytes in T_FAIR (the vn share the port rate).
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002012 vn_weight_sum should not be larger than 10000, thus
2013 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2014 than zero */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002015 m_fair_vn.vn_credit_delta =
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002016 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2017 (8 * bp->vn_weight_sum))),
2018 (bp->cmng.fair_vars.fair_threshold * 2));
2019 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002020 m_fair_vn.vn_credit_delta);
2021 }
2022
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002023 /* Store it to internal memory */
2024 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2025 REG_WR(bp, BAR_XSTRORM_INTMEM +
2026 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2027 ((u32 *)(&m_rs_vn))[i]);
2028
2029 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2030 REG_WR(bp, BAR_XSTRORM_INTMEM +
2031 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2032 ((u32 *)(&m_fair_vn))[i]);
2033}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002034
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002035static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2036{
2037 if (CHIP_REV_IS_SLOW(bp))
2038 return CMNG_FNS_NONE;
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00002039 if (IS_MF(bp))
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002040 return CMNG_FNS_MINMAX;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002041
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002042 return CMNG_FNS_NONE;
2043}
2044
2045static void bnx2x_read_mf_cfg(struct bnx2x *bp)
2046{
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002047 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002048
2049 if (BP_NOMCP(bp))
2050 return; /* what should be the default bvalue in this case */
2051
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002052 /* For 2 port configuration the absolute function number formula
2053 * is:
2054 * abs_func = 2 * vn + BP_PORT + BP_PATH
2055 *
2056 * and there are 4 functions per port
2057 *
2058 * For 4 port configuration it is
2059 * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
2060 *
2061 * and there are 2 functions per port
2062 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002063 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002064 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2065
2066 if (func >= E1H_FUNC_MAX)
2067 break;
2068
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002069 bp->mf_config[vn] =
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002070 MF_CFG_RD(bp, func_mf_config[func].config);
2071 }
2072}
2073
2074static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2075{
2076
2077 if (cmng_type == CMNG_FNS_MINMAX) {
2078 int vn;
2079
2080 /* clear cmng_enables */
2081 bp->cmng.flags.cmng_enables = 0;
2082
2083 /* read mf conf from shmem */
2084 if (read_cfg)
2085 bnx2x_read_mf_cfg(bp);
2086
2087 /* Init rate shaping and fairness contexts */
2088 bnx2x_init_port_minmax(bp);
2089
2090 /* vn_weight_sum and enable fairness if not 0 */
2091 bnx2x_calc_vn_weight_sum(bp);
2092
2093 /* calculate and set min-max rate for each vn */
2094 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2095 bnx2x_init_vn_minmax(bp, vn);
2096
2097 /* always enable rate shaping and fairness */
2098 bp->cmng.flags.cmng_enables |=
2099 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2100 if (!bp->vn_weight_sum)
2101 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2102 " fairness will be disabled\n");
2103 return;
2104 }
2105
2106 /* rate shaping and fairness are disabled */
2107 DP(NETIF_MSG_IFUP,
2108 "rate shaping and fairness are disabled\n");
2109}
2110
2111static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2112{
2113 int port = BP_PORT(bp);
2114 int func;
2115 int vn;
2116
2117 /* Set the attention towards other drivers on the same port */
2118 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2119 if (vn == BP_E1HVN(bp))
2120 continue;
2121
2122 func = ((vn << 1) | port);
2123 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2124 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2125 }
2126}
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002127
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002128/* This function is called upon link interrupt */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002129static void bnx2x_link_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002130{
Vladislav Zolotarovd9e8b182010-04-19 01:15:08 +00002131 u32 prev_link_status = bp->link_vars.link_status;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002132 /* Make sure that we are synced with the current statistics */
2133 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2134
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002135 bnx2x_link_update(&bp->link_params, &bp->link_vars);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002136
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002137 if (bp->link_vars.link_up) {
2138
Eilon Greenstein1c063282009-02-12 08:36:43 +00002139 /* dropless flow control */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002140 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
Eilon Greenstein1c063282009-02-12 08:36:43 +00002141 int port = BP_PORT(bp);
2142 u32 pause_enabled = 0;
2143
2144 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2145 pause_enabled = 1;
2146
2147 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07002148 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
Eilon Greenstein1c063282009-02-12 08:36:43 +00002149 pause_enabled);
2150 }
2151
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002152 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2153 struct host_port_stats *pstats;
2154
2155 pstats = bnx2x_sp(bp, port_stats);
2156 /* reset old bmac stats */
2157 memset(&(pstats->mac_stx[0]), 0,
2158 sizeof(struct mac_stx));
2159 }
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002160 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002161 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2162 }
2163
Vladislav Zolotarovd9e8b182010-04-19 01:15:08 +00002164 /* indicate link status only if link status actually changed */
2165 if (prev_link_status != bp->link_vars.link_status)
2166 bnx2x_link_report(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002167
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002168 if (IS_MF(bp))
2169 bnx2x_link_sync_notify(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002170
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002171 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2172 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002173
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002174 if (cmng_fns != CMNG_FNS_NONE) {
2175 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2176 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2177 } else
2178 /* rate shaping and fairness are disabled */
2179 DP(NETIF_MSG_IFUP,
2180 "single function mode without fairness\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002181 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002182}
2183
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002184void bnx2x__link_status_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002185{
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002186 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002187 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002188
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002189 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2190
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002191 if (bp->link_vars.link_up)
2192 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2193 else
2194 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2195
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002196 /* the link status update could be the result of a DCC event
2197 hence re-read the shmem mf configuration */
2198 bnx2x_read_mf_cfg(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002199
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002200 /* indicate link status */
2201 bnx2x_link_report(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002202}
2203
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002204static void bnx2x_pmf_update(struct bnx2x *bp)
2205{
2206 int port = BP_PORT(bp);
2207 u32 val;
2208
2209 bp->port.pmf = 1;
2210 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2211
2212 /* enable nig attention */
2213 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002214 if (bp->common.int_block == INT_BLOCK_HC) {
2215 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2216 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2217 } else if (CHIP_IS_E2(bp)) {
2218 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2219 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2220 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002221
2222 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002223}
2224
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002225/* end of Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002226
2227/* slow path */
2228
2229/*
2230 * General service functions
2231 */
2232
Eilon Greenstein2691d512009-08-12 08:22:08 +00002233/* send the MCP a request, block until there is a reply */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002234u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
Eilon Greenstein2691d512009-08-12 08:22:08 +00002235{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002236 int mb_idx = BP_FW_MB_IDX(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002237 u32 seq = ++bp->fw_seq;
2238 u32 rc = 0;
2239 u32 cnt = 1;
2240 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2241
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002242 mutex_lock(&bp->fw_mb_mutex);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002243 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2244 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2245
Eilon Greenstein2691d512009-08-12 08:22:08 +00002246 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2247
2248 do {
2249 /* let the FW do it's magic ... */
2250 msleep(delay);
2251
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002252 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002253
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002254 /* Give the FW up to 5 second (500*10ms) */
2255 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
Eilon Greenstein2691d512009-08-12 08:22:08 +00002256
2257 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2258 cnt*delay, rc, seq);
2259
2260 /* is this a reply to our command? */
2261 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2262 rc &= FW_MSG_CODE_MASK;
2263 else {
2264 /* FW BUG! */
2265 BNX2X_ERR("FW failed to respond!\n");
2266 bnx2x_fw_dump(bp);
2267 rc = 0;
2268 }
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002269 mutex_unlock(&bp->fw_mb_mutex);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002270
2271 return rc;
2272}
2273
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002274static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
2275{
2276#ifdef BCM_CNIC
2277 if (IS_FCOE_FP(fp) && IS_MF(bp))
2278 return false;
2279#endif
2280 return true;
2281}
2282
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002283/* must be called under rtnl_lock */
stephen hemminger8d962862010-10-21 07:50:56 +00002284static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002285{
2286 u32 mask = (1 << cl_id);
2287
2288 /* initial seeting is BNX2X_ACCEPT_NONE */
2289 u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2290 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2291 u8 unmatched_unicast = 0;
2292
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002293 if (filters & BNX2X_ACCEPT_UNMATCHED_UCAST)
2294 unmatched_unicast = 1;
2295
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002296 if (filters & BNX2X_PROMISCUOUS_MODE) {
2297 /* promiscious - accept all, drop none */
2298 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2299 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002300 if (IS_MF_SI(bp)) {
2301 /*
2302 * SI mode defines to accept in promiscuos mode
2303 * only unmatched packets
2304 */
2305 unmatched_unicast = 1;
2306 accp_all_ucast = 0;
2307 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002308 }
2309 if (filters & BNX2X_ACCEPT_UNICAST) {
2310 /* accept matched ucast */
2311 drop_all_ucast = 0;
2312 }
Vladislav Zolotarovd9c8f492011-02-01 14:05:30 -08002313 if (filters & BNX2X_ACCEPT_MULTICAST)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002314 /* accept matched mcast */
2315 drop_all_mcast = 0;
Vladislav Zolotarovd9c8f492011-02-01 14:05:30 -08002316
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002317 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2318 /* accept all mcast */
2319 drop_all_ucast = 0;
2320 accp_all_ucast = 1;
2321 }
2322 if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2323 /* accept all mcast */
2324 drop_all_mcast = 0;
2325 accp_all_mcast = 1;
2326 }
2327 if (filters & BNX2X_ACCEPT_BROADCAST) {
2328 /* accept (all) bcast */
2329 drop_all_bcast = 0;
2330 accp_all_bcast = 1;
2331 }
2332
2333 bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2334 bp->mac_filters.ucast_drop_all | mask :
2335 bp->mac_filters.ucast_drop_all & ~mask;
2336
2337 bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2338 bp->mac_filters.mcast_drop_all | mask :
2339 bp->mac_filters.mcast_drop_all & ~mask;
2340
2341 bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2342 bp->mac_filters.bcast_drop_all | mask :
2343 bp->mac_filters.bcast_drop_all & ~mask;
2344
2345 bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2346 bp->mac_filters.ucast_accept_all | mask :
2347 bp->mac_filters.ucast_accept_all & ~mask;
2348
2349 bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2350 bp->mac_filters.mcast_accept_all | mask :
2351 bp->mac_filters.mcast_accept_all & ~mask;
2352
2353 bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2354 bp->mac_filters.bcast_accept_all | mask :
2355 bp->mac_filters.bcast_accept_all & ~mask;
2356
2357 bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2358 bp->mac_filters.unmatched_unicast | mask :
2359 bp->mac_filters.unmatched_unicast & ~mask;
2360}
2361
stephen hemminger8d962862010-10-21 07:50:56 +00002362static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002363{
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002364 struct tstorm_eth_function_common_config tcfg = {0};
2365 u16 rss_flgs;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002366
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002367 /* tpa */
2368 if (p->func_flgs & FUNC_FLG_TPA)
2369 tcfg.config_flags |=
2370 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002371
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002372 /* set rss flags */
2373 rss_flgs = (p->rss->mode <<
2374 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002375
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002376 if (p->rss->cap & RSS_IPV4_CAP)
2377 rss_flgs |= RSS_IPV4_CAP_MASK;
2378 if (p->rss->cap & RSS_IPV4_TCP_CAP)
2379 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2380 if (p->rss->cap & RSS_IPV6_CAP)
2381 rss_flgs |= RSS_IPV6_CAP_MASK;
2382 if (p->rss->cap & RSS_IPV6_TCP_CAP)
2383 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002384
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002385 tcfg.config_flags |= rss_flgs;
2386 tcfg.rss_result_mask = p->rss->result_mask;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002387
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002388 storm_memset_func_cfg(bp, &tcfg, p->func_id);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002389
2390 /* Enable the function in the FW */
2391 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2392 storm_memset_func_en(bp, p->func_id, 1);
2393
2394 /* statistics */
2395 if (p->func_flgs & FUNC_FLG_STATS) {
2396 struct stats_indication_flags stats_flags = {0};
2397 stats_flags.collect_eth = 1;
2398
2399 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2400 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2401
2402 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2403 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2404
2405 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2406 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2407
2408 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2409 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2410 }
2411
2412 /* spq */
2413 if (p->func_flgs & FUNC_FLG_SPQ) {
2414 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2415 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2416 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2417 }
2418}
2419
2420static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2421 struct bnx2x_fastpath *fp)
2422{
2423 u16 flags = 0;
2424
2425 /* calculate queue flags */
2426 flags |= QUEUE_FLG_CACHE_ALIGN;
2427 flags |= QUEUE_FLG_HC;
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002428 flags |= IS_MF_SD(bp) ? QUEUE_FLG_OV : 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002429
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002430 flags |= QUEUE_FLG_VLAN;
2431 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002432
2433 if (!fp->disable_tpa)
2434 flags |= QUEUE_FLG_TPA;
2435
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002436 flags = stat_counter_valid(bp, fp) ?
2437 (flags | QUEUE_FLG_STATS) : (flags & ~QUEUE_FLG_STATS);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002438
2439 return flags;
2440}
2441
2442static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2443 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2444 struct bnx2x_rxq_init_params *rxq_init)
2445{
2446 u16 max_sge = 0;
2447 u16 sge_sz = 0;
2448 u16 tpa_agg_size = 0;
2449
2450 /* calculate queue flags */
2451 u16 flags = bnx2x_get_cl_flags(bp, fp);
2452
2453 if (!fp->disable_tpa) {
2454 pause->sge_th_hi = 250;
2455 pause->sge_th_lo = 150;
2456 tpa_agg_size = min_t(u32,
2457 (min_t(u32, 8, MAX_SKB_FRAGS) *
2458 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2459 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2460 SGE_PAGE_SHIFT;
2461 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2462 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2463 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2464 0xffff);
2465 }
2466
2467 /* pause - not for e1 */
2468 if (!CHIP_IS_E1(bp)) {
2469 pause->bd_th_hi = 350;
2470 pause->bd_th_lo = 250;
2471 pause->rcq_th_hi = 350;
2472 pause->rcq_th_lo = 250;
2473 pause->sge_th_hi = 0;
2474 pause->sge_th_lo = 0;
2475 pause->pri_map = 1;
2476 }
2477
2478 /* rxq setup */
2479 rxq_init->flags = flags;
2480 rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2481 rxq_init->dscr_map = fp->rx_desc_mapping;
2482 rxq_init->sge_map = fp->rx_sge_mapping;
2483 rxq_init->rcq_map = fp->rx_comp_mapping;
2484 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2485 rxq_init->mtu = bp->dev->mtu;
2486 rxq_init->buf_sz = bp->rx_buf_size;
2487 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2488 rxq_init->cl_id = fp->cl_id;
2489 rxq_init->spcl_id = fp->cl_id;
2490 rxq_init->stat_id = fp->cl_id;
2491 rxq_init->tpa_agg_sz = tpa_agg_size;
2492 rxq_init->sge_buf_sz = sge_sz;
2493 rxq_init->max_sges_pkt = max_sge;
2494 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2495 rxq_init->fw_sb_id = fp->fw_sb_id;
2496
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002497 if (IS_FCOE_FP(fp))
2498 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
2499 else
2500 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002501
2502 rxq_init->cid = HW_CID(bp, fp->cid);
2503
2504 rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2505}
2506
2507static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2508 struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2509{
2510 u16 flags = bnx2x_get_cl_flags(bp, fp);
2511
2512 txq_init->flags = flags;
2513 txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2514 txq_init->dscr_map = fp->tx_desc_mapping;
2515 txq_init->stat_id = fp->cl_id;
2516 txq_init->cid = HW_CID(bp, fp->cid);
2517 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2518 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2519 txq_init->fw_sb_id = fp->fw_sb_id;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002520
2521 if (IS_FCOE_FP(fp)) {
2522 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
2523 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
2524 }
2525
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002526 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2527}
2528
stephen hemminger8d962862010-10-21 07:50:56 +00002529static void bnx2x_pf_init(struct bnx2x *bp)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002530{
2531 struct bnx2x_func_init_params func_init = {0};
2532 struct bnx2x_rss_params rss = {0};
2533 struct event_ring_data eq_data = { {0} };
2534 u16 flags;
2535
2536 /* pf specific setups */
2537 if (!CHIP_IS_E1(bp))
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00002538 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002539
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002540 if (CHIP_IS_E2(bp)) {
2541 /* reset IGU PF statistics: MSIX + ATTN */
2542 /* PF */
2543 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2544 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2545 (CHIP_MODE_IS_4_PORT(bp) ?
2546 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2547 /* ATTN */
2548 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2549 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2550 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2551 (CHIP_MODE_IS_4_PORT(bp) ?
2552 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2553 }
2554
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002555 /* function setup flags */
2556 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2557
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002558 if (CHIP_IS_E1x(bp))
2559 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2560 else
2561 flags |= FUNC_FLG_TPA;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002562
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002563 /* function setup */
2564
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002565 /**
2566 * Although RSS is meaningless when there is a single HW queue we
2567 * still need it enabled in order to have HW Rx hash generated.
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002568 */
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002569 rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2570 RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2571 rss.mode = bp->multi_mode;
2572 rss.result_mask = MULTI_MASK;
2573 func_init.rss = &rss;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002574
2575 func_init.func_flgs = flags;
2576 func_init.pf_id = BP_FUNC(bp);
2577 func_init.func_id = BP_FUNC(bp);
2578 func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2579 func_init.spq_map = bp->spq_mapping;
2580 func_init.spq_prod = bp->spq_prod_idx;
2581
2582 bnx2x_func_init(bp, &func_init);
2583
2584 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2585
2586 /*
2587 Congestion management values depend on the link rate
2588 There is no active link so initial link rate is set to 10 Gbps.
2589 When the link comes up The congestion management values are
2590 re-calculated according to the actual link rate.
2591 */
2592 bp->link_vars.line_speed = SPEED_10000;
2593 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2594
2595 /* Only the PMF sets the HW */
2596 if (bp->port.pmf)
2597 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2598
2599 /* no rx until link is up */
2600 bp->rx_mode = BNX2X_RX_MODE_NONE;
2601 bnx2x_set_storm_rx_mode(bp);
2602
2603 /* init Event Queue */
2604 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2605 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2606 eq_data.producer = bp->eq_prod;
2607 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2608 eq_data.sb_id = DEF_SB_ID;
2609 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2610}
2611
2612
Eilon Greenstein2691d512009-08-12 08:22:08 +00002613static void bnx2x_e1h_disable(struct bnx2x *bp)
2614{
2615 int port = BP_PORT(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002616
2617 netif_tx_disable(bp->dev);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002618
2619 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2620
Eilon Greenstein2691d512009-08-12 08:22:08 +00002621 netif_carrier_off(bp->dev);
2622}
2623
2624static void bnx2x_e1h_enable(struct bnx2x *bp)
2625{
2626 int port = BP_PORT(bp);
2627
2628 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2629
Eilon Greenstein2691d512009-08-12 08:22:08 +00002630 /* Tx queue should be only reenabled */
2631 netif_tx_wake_all_queues(bp->dev);
2632
Eilon Greenstein061bc702009-10-15 00:18:47 -07002633 /*
2634 * Should not call netif_carrier_on since it will be called if the link
2635 * is up when checking for link state
2636 */
Eilon Greenstein2691d512009-08-12 08:22:08 +00002637}
2638
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002639/* called due to MCP event (on pmf):
2640 * reread new bandwidth configuration
2641 * configure FW
2642 * notify others function about the change
2643 */
2644static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
2645{
2646 if (bp->link_vars.link_up) {
2647 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2648 bnx2x_link_sync_notify(bp);
2649 }
2650 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2651}
2652
2653static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
2654{
2655 bnx2x_config_mf_bw(bp);
2656 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
2657}
2658
Eilon Greenstein2691d512009-08-12 08:22:08 +00002659static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2660{
Eilon Greenstein2691d512009-08-12 08:22:08 +00002661 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002662
2663 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2664
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002665 /*
2666 * This is the only place besides the function initialization
2667 * where the bp->flags can change so it is done without any
2668 * locks
2669 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002670 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
Eilon Greenstein2691d512009-08-12 08:22:08 +00002671 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002672 bp->flags |= MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002673
2674 bnx2x_e1h_disable(bp);
2675 } else {
2676 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002677 bp->flags &= ~MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002678
2679 bnx2x_e1h_enable(bp);
2680 }
2681 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2682 }
2683 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002684 bnx2x_config_mf_bw(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002685 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2686 }
2687
2688 /* Report results to MCP */
2689 if (dcc_event)
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002690 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002691 else
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002692 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002693}
2694
Michael Chan28912902009-10-10 13:46:53 +00002695/* must be called under the spq lock */
2696static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2697{
2698 struct eth_spe *next_spe = bp->spq_prod_bd;
2699
2700 if (bp->spq_prod_bd == bp->spq_last_bd) {
2701 bp->spq_prod_bd = bp->spq;
2702 bp->spq_prod_idx = 0;
2703 DP(NETIF_MSG_TIMER, "end of spq\n");
2704 } else {
2705 bp->spq_prod_bd++;
2706 bp->spq_prod_idx++;
2707 }
2708 return next_spe;
2709}
2710
2711/* must be called under the spq lock */
2712static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2713{
2714 int func = BP_FUNC(bp);
2715
2716 /* Make sure that BD data is updated before writing the producer */
2717 wmb();
2718
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002719 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002720 bp->spq_prod_idx);
Michael Chan28912902009-10-10 13:46:53 +00002721 mmiowb();
2722}
2723
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002724/* the slow path queue is odd since completions arrive on the fastpath ring */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002725int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002726 u32 data_hi, u32 data_lo, int common)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002727{
Michael Chan28912902009-10-10 13:46:53 +00002728 struct eth_spe *spe;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002729 u16 type;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002730
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002731#ifdef BNX2X_STOP_ON_ERROR
2732 if (unlikely(bp->panic))
2733 return -EIO;
2734#endif
2735
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002736 spin_lock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002737
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00002738 if (!atomic_read(&bp->spq_left)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002739 BNX2X_ERR("BUG! SPQ ring full!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002740 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002741 bnx2x_panic();
2742 return -EBUSY;
2743 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08002744
Michael Chan28912902009-10-10 13:46:53 +00002745 spe = bnx2x_sp_get_next(bp);
2746
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002747 /* CID needs port number to be encoded int it */
Michael Chan28912902009-10-10 13:46:53 +00002748 spe->hdr.conn_and_cmd_data =
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002749 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2750 HW_CID(bp, cid));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002751
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002752 if (common)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002753 /* Common ramrods:
2754 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2755 * TRAFFIC_STOP, TRAFFIC_START
2756 */
2757 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2758 & SPE_HDR_CONN_TYPE;
2759 else
2760 /* ETH ramrods: SETUP, HALT */
2761 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2762 & SPE_HDR_CONN_TYPE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002763
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002764 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2765 SPE_HDR_FUNCTION_ID);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002766
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002767 spe->hdr.type = cpu_to_le16(type);
2768
2769 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2770 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2771
2772 /* stats ramrod has it's own slot on the spq */
2773 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
2774 /* It's ok if the actual decrement is issued towards the memory
2775 * somewhere between the spin_lock and spin_unlock. Thus no
2776 * more explict memory barrier is needed.
2777 */
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00002778 atomic_dec(&bp->spq_left);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002779
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002780 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002781 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
2782 "type(0x%x) left %x\n",
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002783 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2784 (u32)(U64_LO(bp->spq_mapping) +
2785 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00002786 HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002787
Michael Chan28912902009-10-10 13:46:53 +00002788 bnx2x_sp_prod_update(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002789 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002790 return 0;
2791}
2792
2793/* acquire split MCP access lock register */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002794static int bnx2x_acquire_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002795{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002796 u32 j, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002797 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002798
2799 might_sleep();
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002800 for (j = 0; j < 1000; j++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002801 val = (1UL << 31);
2802 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2803 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2804 if (val & (1L << 31))
2805 break;
2806
2807 msleep(5);
2808 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002809 if (!(val & (1L << 31))) {
Eilon Greenstein19680c42008-08-13 15:47:33 -07002810 BNX2X_ERR("Cannot acquire MCP access lock register\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002811 rc = -EBUSY;
2812 }
2813
2814 return rc;
2815}
2816
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002817/* release split MCP access lock register */
2818static void bnx2x_release_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002819{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002820 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002821}
2822
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002823#define BNX2X_DEF_SB_ATT_IDX 0x0001
2824#define BNX2X_DEF_SB_IDX 0x0002
2825
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002826static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2827{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002828 struct host_sp_status_block *def_sb = bp->def_status_blk;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002829 u16 rc = 0;
2830
2831 barrier(); /* status block is written to by the chip */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002832 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2833 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002834 rc |= BNX2X_DEF_SB_ATT_IDX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002835 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002836
2837 if (bp->def_idx != def_sb->sp_sb.running_index) {
2838 bp->def_idx = def_sb->sp_sb.running_index;
2839 rc |= BNX2X_DEF_SB_IDX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002840 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002841
2842 /* Do not reorder: indecies reading should complete before handling */
2843 barrier();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002844 return rc;
2845}
2846
2847/*
2848 * slow path service functions
2849 */
2850
2851static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2852{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002853 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002854 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2855 MISC_REG_AEU_MASK_ATTN_FUNC_0;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002856 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2857 NIG_REG_MASK_INTERRUPT_PORT0;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002858 u32 aeu_mask;
Eilon Greenstein87942b42009-02-12 08:36:49 +00002859 u32 nig_mask = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002860 u32 reg_addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002861
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002862 if (bp->attn_state & asserted)
2863 BNX2X_ERR("IGU ERROR\n");
2864
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002865 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2866 aeu_mask = REG_RD(bp, aeu_addr);
2867
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002868 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002869 aeu_mask, asserted);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002870 aeu_mask &= ~(asserted & 0x3ff);
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002871 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002872
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002873 REG_WR(bp, aeu_addr, aeu_mask);
2874 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002875
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002876 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002877 bp->attn_state |= asserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002878 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002879
2880 if (asserted & ATTN_HARD_WIRED_MASK) {
2881 if (asserted & ATTN_NIG_FOR_FUNC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002882
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002883 bnx2x_acquire_phy_lock(bp);
2884
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002885 /* save nig interrupt mask */
Eilon Greenstein87942b42009-02-12 08:36:49 +00002886 nig_mask = REG_RD(bp, nig_int_mask_addr);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002887 REG_WR(bp, nig_int_mask_addr, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002888
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002889 bnx2x_link_attn(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002890
2891 /* handle unicore attn? */
2892 }
2893 if (asserted & ATTN_SW_TIMER_4_FUNC)
2894 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2895
2896 if (asserted & GPIO_2_FUNC)
2897 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2898
2899 if (asserted & GPIO_3_FUNC)
2900 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2901
2902 if (asserted & GPIO_4_FUNC)
2903 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2904
2905 if (port == 0) {
2906 if (asserted & ATTN_GENERAL_ATTN_1) {
2907 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2908 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2909 }
2910 if (asserted & ATTN_GENERAL_ATTN_2) {
2911 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2912 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2913 }
2914 if (asserted & ATTN_GENERAL_ATTN_3) {
2915 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2916 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2917 }
2918 } else {
2919 if (asserted & ATTN_GENERAL_ATTN_4) {
2920 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2921 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2922 }
2923 if (asserted & ATTN_GENERAL_ATTN_5) {
2924 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2925 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2926 }
2927 if (asserted & ATTN_GENERAL_ATTN_6) {
2928 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2929 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2930 }
2931 }
2932
2933 } /* if hardwired */
2934
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002935 if (bp->common.int_block == INT_BLOCK_HC)
2936 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2937 COMMAND_REG_ATTN_BITS_SET);
2938 else
2939 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2940
2941 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2942 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2943 REG_WR(bp, reg_addr, asserted);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002944
2945 /* now set back the mask */
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002946 if (asserted & ATTN_NIG_FOR_FUNC) {
Eilon Greenstein87942b42009-02-12 08:36:49 +00002947 REG_WR(bp, nig_int_mask_addr, nig_mask);
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002948 bnx2x_release_phy_lock(bp);
2949 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002950}
2951
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002952static inline void bnx2x_fan_failure(struct bnx2x *bp)
2953{
2954 int port = BP_PORT(bp);
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002955 u32 ext_phy_config;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002956 /* mark the failure */
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002957 ext_phy_config =
2958 SHMEM_RD(bp,
2959 dev_info.port_hw_config[port].external_phy_config);
2960
2961 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2962 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002963 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002964 ext_phy_config);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002965
2966 /* log the failure */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002967 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2968 " the driver to shutdown the card to prevent permanent"
2969 " damage. Please contact OEM Support for assistance\n");
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002970}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00002971
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002972static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2973{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002974 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002975 int reg_offset;
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00002976 u32 val;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002977
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002978 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2979 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002980
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002981 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002982
2983 val = REG_RD(bp, reg_offset);
2984 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2985 REG_WR(bp, reg_offset, val);
2986
2987 BNX2X_ERR("SPIO5 hw attention\n");
2988
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002989 /* Fan failure attention */
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00002990 bnx2x_hw_reset_phy(&bp->link_params);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002991 bnx2x_fan_failure(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002992 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002993
Eilon Greenstein589abe32009-02-12 08:36:55 +00002994 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2995 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2996 bnx2x_acquire_phy_lock(bp);
2997 bnx2x_handle_module_detect_int(&bp->link_params);
2998 bnx2x_release_phy_lock(bp);
2999 }
3000
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003001 if (attn & HW_INTERRUT_ASSERT_SET_0) {
3002
3003 val = REG_RD(bp, reg_offset);
3004 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3005 REG_WR(bp, reg_offset, val);
3006
3007 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00003008 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003009 bnx2x_panic();
3010 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003011}
3012
3013static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3014{
3015 u32 val;
3016
Eilon Greenstein0626b892009-02-12 08:38:14 +00003017 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003018
3019 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3020 BNX2X_ERR("DB hw attention 0x%x\n", val);
3021 /* DORQ discard attention */
3022 if (val & 0x2)
3023 BNX2X_ERR("FATAL error from DORQ\n");
3024 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003025
3026 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3027
3028 int port = BP_PORT(bp);
3029 int reg_offset;
3030
3031 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3032 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3033
3034 val = REG_RD(bp, reg_offset);
3035 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3036 REG_WR(bp, reg_offset, val);
3037
3038 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00003039 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003040 bnx2x_panic();
3041 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003042}
3043
3044static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3045{
3046 u32 val;
3047
3048 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3049
3050 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3051 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3052 /* CFC error attention */
3053 if (val & 0x2)
3054 BNX2X_ERR("FATAL error from CFC\n");
3055 }
3056
3057 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3058
3059 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3060 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3061 /* RQ_USDMDP_FIFO_OVERFLOW */
3062 if (val & 0x18000)
3063 BNX2X_ERR("FATAL error from PXP\n");
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003064 if (CHIP_IS_E2(bp)) {
3065 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
3066 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
3067 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003068 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003069
3070 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3071
3072 int port = BP_PORT(bp);
3073 int reg_offset;
3074
3075 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3076 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3077
3078 val = REG_RD(bp, reg_offset);
3079 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3080 REG_WR(bp, reg_offset, val);
3081
3082 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00003083 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003084 bnx2x_panic();
3085 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003086}
3087
3088static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3089{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003090 u32 val;
3091
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003092 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3093
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003094 if (attn & BNX2X_PMF_LINK_ASSERT) {
3095 int func = BP_FUNC(bp);
3096
3097 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003098 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3099 func_mf_config[BP_ABS_FUNC(bp)].config);
3100 val = SHMEM_RD(bp,
3101 func_mb[BP_FW_MB_IDX(bp)].drv_status);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003102 if (val & DRV_STATUS_DCC_EVENT_MASK)
3103 bnx2x_dcc_event(bp,
3104 (val & DRV_STATUS_DCC_EVENT_MASK));
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08003105
3106 if (val & DRV_STATUS_SET_MF_BW)
3107 bnx2x_set_mf_bw(bp);
3108
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003109 bnx2x__link_status_update(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003110 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003111 bnx2x_pmf_update(bp);
3112
Vladislav Zolotarove4901dd2010-12-13 05:44:18 +00003113 if (bp->port.pmf &&
Shmulik Ravid785b9b12010-12-30 06:27:03 +00003114 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
3115 bp->dcbx_enabled > 0)
Vladislav Zolotarove4901dd2010-12-13 05:44:18 +00003116 /* start dcbx state machine */
3117 bnx2x_dcbx_set_params(bp,
3118 BNX2X_DCBX_STATE_NEG_RECEIVED);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003119 } else if (attn & BNX2X_MC_ASSERT_BITS) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003120
3121 BNX2X_ERR("MC assert!\n");
3122 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3123 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3124 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3125 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3126 bnx2x_panic();
3127
3128 } else if (attn & BNX2X_MCP_ASSERT) {
3129
3130 BNX2X_ERR("MCP assert!\n");
3131 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003132 bnx2x_fw_dump(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003133
3134 } else
3135 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3136 }
3137
3138 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003139 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3140 if (attn & BNX2X_GRC_TIMEOUT) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003141 val = CHIP_IS_E1(bp) ? 0 :
3142 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003143 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3144 }
3145 if (attn & BNX2X_GRC_RSV) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003146 val = CHIP_IS_E1(bp) ? 0 :
3147 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003148 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3149 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003150 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003151 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003152}
3153
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003154#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3155#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3156#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3157#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3158#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003159
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003160/*
3161 * should be run under rtnl lock
3162 */
3163static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3164{
3165 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3166 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3167 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3168 barrier();
3169 mmiowb();
3170}
3171
3172/*
3173 * should be run under rtnl lock
3174 */
3175static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3176{
3177 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3178 val |= (1 << 16);
3179 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3180 barrier();
3181 mmiowb();
3182}
3183
3184/*
3185 * should be run under rtnl lock
3186 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003187bool bnx2x_reset_is_done(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003188{
3189 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3190 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3191 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3192}
3193
3194/*
3195 * should be run under rtnl lock
3196 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003197inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003198{
3199 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3200
3201 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3202
3203 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3204 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3205 barrier();
3206 mmiowb();
3207}
3208
3209/*
3210 * should be run under rtnl lock
3211 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003212u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003213{
3214 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3215
3216 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3217
3218 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3219 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3220 barrier();
3221 mmiowb();
3222
3223 return val1;
3224}
3225
3226/*
3227 * should be run under rtnl lock
3228 */
3229static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3230{
3231 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3232}
3233
3234static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3235{
3236 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3237 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3238}
3239
3240static inline void _print_next_block(int idx, const char *blk)
3241{
3242 if (idx)
3243 pr_cont(", ");
3244 pr_cont("%s", blk);
3245}
3246
3247static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3248{
3249 int i = 0;
3250 u32 cur_bit = 0;
3251 for (i = 0; sig; i++) {
3252 cur_bit = ((u32)0x1 << i);
3253 if (sig & cur_bit) {
3254 switch (cur_bit) {
3255 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3256 _print_next_block(par_num++, "BRB");
3257 break;
3258 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3259 _print_next_block(par_num++, "PARSER");
3260 break;
3261 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3262 _print_next_block(par_num++, "TSDM");
3263 break;
3264 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3265 _print_next_block(par_num++, "SEARCHER");
3266 break;
3267 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3268 _print_next_block(par_num++, "TSEMI");
3269 break;
3270 }
3271
3272 /* Clear the bit */
3273 sig &= ~cur_bit;
3274 }
3275 }
3276
3277 return par_num;
3278}
3279
3280static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3281{
3282 int i = 0;
3283 u32 cur_bit = 0;
3284 for (i = 0; sig; i++) {
3285 cur_bit = ((u32)0x1 << i);
3286 if (sig & cur_bit) {
3287 switch (cur_bit) {
3288 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3289 _print_next_block(par_num++, "PBCLIENT");
3290 break;
3291 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3292 _print_next_block(par_num++, "QM");
3293 break;
3294 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3295 _print_next_block(par_num++, "XSDM");
3296 break;
3297 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3298 _print_next_block(par_num++, "XSEMI");
3299 break;
3300 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3301 _print_next_block(par_num++, "DOORBELLQ");
3302 break;
3303 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3304 _print_next_block(par_num++, "VAUX PCI CORE");
3305 break;
3306 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3307 _print_next_block(par_num++, "DEBUG");
3308 break;
3309 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3310 _print_next_block(par_num++, "USDM");
3311 break;
3312 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3313 _print_next_block(par_num++, "USEMI");
3314 break;
3315 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3316 _print_next_block(par_num++, "UPB");
3317 break;
3318 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3319 _print_next_block(par_num++, "CSDM");
3320 break;
3321 }
3322
3323 /* Clear the bit */
3324 sig &= ~cur_bit;
3325 }
3326 }
3327
3328 return par_num;
3329}
3330
3331static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3332{
3333 int i = 0;
3334 u32 cur_bit = 0;
3335 for (i = 0; sig; i++) {
3336 cur_bit = ((u32)0x1 << i);
3337 if (sig & cur_bit) {
3338 switch (cur_bit) {
3339 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3340 _print_next_block(par_num++, "CSEMI");
3341 break;
3342 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3343 _print_next_block(par_num++, "PXP");
3344 break;
3345 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3346 _print_next_block(par_num++,
3347 "PXPPCICLOCKCLIENT");
3348 break;
3349 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3350 _print_next_block(par_num++, "CFC");
3351 break;
3352 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3353 _print_next_block(par_num++, "CDU");
3354 break;
3355 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3356 _print_next_block(par_num++, "IGU");
3357 break;
3358 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3359 _print_next_block(par_num++, "MISC");
3360 break;
3361 }
3362
3363 /* Clear the bit */
3364 sig &= ~cur_bit;
3365 }
3366 }
3367
3368 return par_num;
3369}
3370
3371static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3372{
3373 int i = 0;
3374 u32 cur_bit = 0;
3375 for (i = 0; sig; i++) {
3376 cur_bit = ((u32)0x1 << i);
3377 if (sig & cur_bit) {
3378 switch (cur_bit) {
3379 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3380 _print_next_block(par_num++, "MCP ROM");
3381 break;
3382 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3383 _print_next_block(par_num++, "MCP UMP RX");
3384 break;
3385 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3386 _print_next_block(par_num++, "MCP UMP TX");
3387 break;
3388 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3389 _print_next_block(par_num++, "MCP SCPAD");
3390 break;
3391 }
3392
3393 /* Clear the bit */
3394 sig &= ~cur_bit;
3395 }
3396 }
3397
3398 return par_num;
3399}
3400
3401static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3402 u32 sig2, u32 sig3)
3403{
3404 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3405 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3406 int par_num = 0;
3407 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3408 "[0]:0x%08x [1]:0x%08x "
3409 "[2]:0x%08x [3]:0x%08x\n",
3410 sig0 & HW_PRTY_ASSERT_SET_0,
3411 sig1 & HW_PRTY_ASSERT_SET_1,
3412 sig2 & HW_PRTY_ASSERT_SET_2,
3413 sig3 & HW_PRTY_ASSERT_SET_3);
3414 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3415 bp->dev->name);
3416 par_num = bnx2x_print_blocks_with_parity0(
3417 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3418 par_num = bnx2x_print_blocks_with_parity1(
3419 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3420 par_num = bnx2x_print_blocks_with_parity2(
3421 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3422 par_num = bnx2x_print_blocks_with_parity3(
3423 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3424 printk("\n");
3425 return true;
3426 } else
3427 return false;
3428}
3429
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003430bool bnx2x_chk_parity_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003431{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003432 struct attn_route attn;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003433 int port = BP_PORT(bp);
3434
3435 attn.sig[0] = REG_RD(bp,
3436 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3437 port*4);
3438 attn.sig[1] = REG_RD(bp,
3439 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3440 port*4);
3441 attn.sig[2] = REG_RD(bp,
3442 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3443 port*4);
3444 attn.sig[3] = REG_RD(bp,
3445 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3446 port*4);
3447
3448 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3449 attn.sig[3]);
3450}
3451
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003452
3453static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3454{
3455 u32 val;
3456 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3457
3458 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3459 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3460 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3461 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3462 "ADDRESS_ERROR\n");
3463 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3464 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3465 "INCORRECT_RCV_BEHAVIOR\n");
3466 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3467 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3468 "WAS_ERROR_ATTN\n");
3469 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3470 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3471 "VF_LENGTH_VIOLATION_ATTN\n");
3472 if (val &
3473 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3474 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3475 "VF_GRC_SPACE_VIOLATION_ATTN\n");
3476 if (val &
3477 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3478 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3479 "VF_MSIX_BAR_VIOLATION_ATTN\n");
3480 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3481 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3482 "TCPL_ERROR_ATTN\n");
3483 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3484 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3485 "TCPL_IN_TWO_RCBS_ATTN\n");
3486 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3487 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3488 "CSSNOOP_FIFO_OVERFLOW\n");
3489 }
3490 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3491 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3492 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3493 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3494 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3495 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3496 BNX2X_ERR("ATC_ATC_INT_STS_REG"
3497 "_ATC_TCPL_TO_NOT_PEND\n");
3498 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3499 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3500 "ATC_GPA_MULTIPLE_HITS\n");
3501 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3502 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3503 "ATC_RCPL_TO_EMPTY_CNT\n");
3504 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3505 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3506 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3507 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3508 "ATC_IREQ_LESS_THAN_STU\n");
3509 }
3510
3511 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3512 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3513 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3514 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3515 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3516 }
3517
3518}
3519
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003520static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3521{
3522 struct attn_route attn, *group_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003523 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003524 int index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003525 u32 reg_addr;
3526 u32 val;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003527 u32 aeu_mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003528
3529 /* need to take HW lock because MCP or other port might also
3530 try to handle this event */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003531 bnx2x_acquire_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003532
Vladislav Zolotarov4a33bc02011-01-09 02:20:04 +00003533 if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) {
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003534 bp->recovery_state = BNX2X_RECOVERY_INIT;
3535 bnx2x_set_reset_in_progress(bp);
3536 schedule_delayed_work(&bp->reset_task, 0);
3537 /* Disable HW interrupts */
3538 bnx2x_int_disable(bp);
3539 bnx2x_release_alr(bp);
3540 /* In case of parity errors don't handle attentions so that
3541 * other function would "see" parity errors.
3542 */
3543 return;
3544 }
3545
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003546 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3547 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3548 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3549 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003550 if (CHIP_IS_E2(bp))
3551 attn.sig[4] =
3552 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3553 else
3554 attn.sig[4] = 0;
3555
3556 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3557 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003558
3559 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3560 if (deasserted & (1 << index)) {
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003561 group_mask = &bp->attn_group[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003562
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003563 DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3564 "%08x %08x %08x\n",
3565 index,
3566 group_mask->sig[0], group_mask->sig[1],
3567 group_mask->sig[2], group_mask->sig[3],
3568 group_mask->sig[4]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003569
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003570 bnx2x_attn_int_deasserted4(bp,
3571 attn.sig[4] & group_mask->sig[4]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003572 bnx2x_attn_int_deasserted3(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003573 attn.sig[3] & group_mask->sig[3]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003574 bnx2x_attn_int_deasserted1(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003575 attn.sig[1] & group_mask->sig[1]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003576 bnx2x_attn_int_deasserted2(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003577 attn.sig[2] & group_mask->sig[2]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003578 bnx2x_attn_int_deasserted0(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003579 attn.sig[0] & group_mask->sig[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003580 }
3581 }
3582
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003583 bnx2x_release_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003584
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003585 if (bp->common.int_block == INT_BLOCK_HC)
3586 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3587 COMMAND_REG_ATTN_BITS_CLR);
3588 else
3589 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003590
3591 val = ~deasserted;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003592 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3593 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
Eilon Greenstein5c862842008-08-13 15:51:48 -07003594 REG_WR(bp, reg_addr, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003595
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003596 if (~bp->attn_state & deasserted)
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003597 BNX2X_ERR("IGU ERROR\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003598
3599 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3600 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3601
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003602 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3603 aeu_mask = REG_RD(bp, reg_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003604
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003605 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3606 aeu_mask, deasserted);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003607 aeu_mask |= (deasserted & 0x3ff);
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003608 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3609
3610 REG_WR(bp, reg_addr, aeu_mask);
3611 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003612
3613 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3614 bp->attn_state &= ~deasserted;
3615 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3616}
3617
3618static void bnx2x_attn_int(struct bnx2x *bp)
3619{
3620 /* read local copy of bits */
Eilon Greenstein68d59482009-01-14 21:27:36 -08003621 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3622 attn_bits);
3623 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3624 attn_bits_ack);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003625 u32 attn_state = bp->attn_state;
3626
3627 /* look for changed bits */
3628 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3629 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3630
3631 DP(NETIF_MSG_HW,
3632 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3633 attn_bits, attn_ack, asserted, deasserted);
3634
3635 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003636 BNX2X_ERR("BAD attention state\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003637
3638 /* handle bits that were raised */
3639 if (asserted)
3640 bnx2x_attn_int_asserted(bp, asserted);
3641
3642 if (deasserted)
3643 bnx2x_attn_int_deasserted(bp, deasserted);
3644}
3645
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003646static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3647{
3648 /* No memory barriers */
3649 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3650 mmiowb(); /* keep prod updates ordered */
3651}
3652
3653#ifdef BCM_CNIC
3654static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3655 union event_ring_elem *elem)
3656{
3657 if (!bp->cnic_eth_dev.starting_cid ||
3658 cid < bp->cnic_eth_dev.starting_cid)
3659 return 1;
3660
3661 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3662
3663 if (unlikely(elem->message.data.cfc_del_event.error)) {
3664 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3665 cid);
3666 bnx2x_panic_dump(bp);
3667 }
3668 bnx2x_cnic_cfc_comp(bp, cid);
3669 return 0;
3670}
3671#endif
3672
3673static void bnx2x_eq_int(struct bnx2x *bp)
3674{
3675 u16 hw_cons, sw_cons, sw_prod;
3676 union event_ring_elem *elem;
3677 u32 cid;
3678 u8 opcode;
3679 int spqe_cnt = 0;
3680
3681 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3682
3683 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3684 * when we get the the next-page we nned to adjust so the loop
3685 * condition below will be met. The next element is the size of a
3686 * regular element and hence incrementing by 1
3687 */
3688 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3689 hw_cons++;
3690
3691 /* This function may never run in parralel with itself for a
3692 * specific bp, thus there is no need in "paired" read memory
3693 * barrier here.
3694 */
3695 sw_cons = bp->eq_cons;
3696 sw_prod = bp->eq_prod;
3697
3698 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n",
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00003699 hw_cons, sw_cons, atomic_read(&bp->spq_left));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003700
3701 for (; sw_cons != hw_cons;
3702 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3703
3704
3705 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3706
3707 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3708 opcode = elem->message.opcode;
3709
3710
3711 /* handle eq element */
3712 switch (opcode) {
3713 case EVENT_RING_OPCODE_STAT_QUERY:
3714 DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3715 /* nothing to do with stats comp */
3716 continue;
3717
3718 case EVENT_RING_OPCODE_CFC_DEL:
3719 /* handle according to cid range */
3720 /*
3721 * we may want to verify here that the bp state is
3722 * HALTING
3723 */
3724 DP(NETIF_MSG_IFDOWN,
3725 "got delete ramrod for MULTI[%d]\n", cid);
3726#ifdef BCM_CNIC
3727 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3728 goto next_spqe;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003729 if (cid == BNX2X_FCOE_ETH_CID)
3730 bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
3731 else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003732#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003733 bnx2x_fp(bp, cid, state) =
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003734 BNX2X_FP_STATE_CLOSED;
3735
3736 goto next_spqe;
Vladislav Zolotarove4901dd2010-12-13 05:44:18 +00003737
3738 case EVENT_RING_OPCODE_STOP_TRAFFIC:
3739 DP(NETIF_MSG_IFUP, "got STOP TRAFFIC\n");
3740 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
3741 goto next_spqe;
3742 case EVENT_RING_OPCODE_START_TRAFFIC:
3743 DP(NETIF_MSG_IFUP, "got START TRAFFIC\n");
3744 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
3745 goto next_spqe;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003746 }
3747
3748 switch (opcode | bp->state) {
3749 case (EVENT_RING_OPCODE_FUNCTION_START |
3750 BNX2X_STATE_OPENING_WAIT4_PORT):
3751 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3752 bp->state = BNX2X_STATE_FUNC_STARTED;
3753 break;
3754
3755 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3756 BNX2X_STATE_CLOSING_WAIT4_HALT):
3757 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3758 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3759 break;
3760
3761 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3762 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3763 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3764 bp->set_mac_pending = 0;
3765 break;
3766
3767 case (EVENT_RING_OPCODE_SET_MAC |
3768 BNX2X_STATE_CLOSING_WAIT4_HALT):
3769 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3770 bp->set_mac_pending = 0;
3771 break;
3772 default:
3773 /* unknown event log error and continue */
3774 BNX2X_ERR("Unknown EQ event %d\n",
3775 elem->message.opcode);
3776 }
3777next_spqe:
3778 spqe_cnt++;
3779 } /* for */
3780
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00003781 smp_mb__before_atomic_inc();
3782 atomic_add(spqe_cnt, &bp->spq_left);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003783
3784 bp->eq_cons = sw_cons;
3785 bp->eq_prod = sw_prod;
3786 /* Make sure that above mem writes were issued towards the memory */
3787 smp_wmb();
3788
3789 /* update producer */
3790 bnx2x_update_eq_prod(bp, bp->eq_prod);
3791}
3792
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003793static void bnx2x_sp_task(struct work_struct *work)
3794{
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003795 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003796 u16 status;
3797
3798 /* Return here if interrupt is disabled */
3799 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003800 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003801 return;
3802 }
3803
3804 status = bnx2x_update_dsb_idx(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003805/* if (status == 0) */
3806/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003807
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003808 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003809
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003810 /* HW attentions */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003811 if (status & BNX2X_DEF_SB_ATT_IDX) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003812 bnx2x_attn_int(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003813 status &= ~BNX2X_DEF_SB_ATT_IDX;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003814 }
3815
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003816 /* SP events: STAT_QUERY and others */
3817 if (status & BNX2X_DEF_SB_IDX) {
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003818#ifdef BCM_CNIC
3819 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003820
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003821 if ((!NO_FCOE(bp)) &&
3822 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp)))
3823 napi_schedule(&bnx2x_fcoe(bp, napi));
3824#endif
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003825 /* Handle EQ completions */
3826 bnx2x_eq_int(bp);
3827
3828 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3829 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3830
3831 status &= ~BNX2X_DEF_SB_IDX;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003832 }
3833
3834 if (unlikely(status))
3835 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3836 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003837
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003838 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3839 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003840}
3841
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003842irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003843{
3844 struct net_device *dev = dev_instance;
3845 struct bnx2x *bp = netdev_priv(dev);
3846
3847 /* Return here if interrupt is disabled */
3848 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003849 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003850 return IRQ_HANDLED;
3851 }
3852
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003853 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3854 IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003855
3856#ifdef BNX2X_STOP_ON_ERROR
3857 if (unlikely(bp->panic))
3858 return IRQ_HANDLED;
3859#endif
3860
Michael Chan993ac7b2009-10-10 13:46:56 +00003861#ifdef BCM_CNIC
3862 {
3863 struct cnic_ops *c_ops;
3864
3865 rcu_read_lock();
3866 c_ops = rcu_dereference(bp->cnic_ops);
3867 if (c_ops)
3868 c_ops->cnic_handler(bp->cnic_data, NULL);
3869 rcu_read_unlock();
3870 }
3871#endif
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003872 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003873
3874 return IRQ_HANDLED;
3875}
3876
3877/* end of slow path */
3878
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003879static void bnx2x_timer(unsigned long data)
3880{
3881 struct bnx2x *bp = (struct bnx2x *) data;
3882
3883 if (!netif_running(bp->dev))
3884 return;
3885
3886 if (atomic_read(&bp->intr_sem) != 0)
Eliezer Tamirf1410642008-02-28 11:51:50 -08003887 goto timer_restart;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003888
3889 if (poll) {
3890 struct bnx2x_fastpath *fp = &bp->fp[0];
3891 int rc;
3892
Eilon Greenstein7961f792009-03-02 07:59:31 +00003893 bnx2x_tx_int(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003894 rc = bnx2x_rx_int(fp, 1000);
3895 }
3896
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003897 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003898 int mb_idx = BP_FW_MB_IDX(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003899 u32 drv_pulse;
3900 u32 mcp_pulse;
3901
3902 ++bp->fw_drv_pulse_wr_seq;
3903 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3904 /* TBD - add SYSTEM_TIME */
3905 drv_pulse = bp->fw_drv_pulse_wr_seq;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003906 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003907
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003908 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003909 MCP_PULSE_SEQ_MASK);
3910 /* The delta between driver pulse and mcp response
3911 * should be 1 (before mcp response) or 0 (after mcp response)
3912 */
3913 if ((drv_pulse != mcp_pulse) &&
3914 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3915 /* someone lost a heartbeat... */
3916 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3917 drv_pulse, mcp_pulse);
3918 }
3919 }
3920
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07003921 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003922 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003923
Eliezer Tamirf1410642008-02-28 11:51:50 -08003924timer_restart:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003925 mod_timer(&bp->timer, jiffies + bp->current_interval);
3926}
3927
3928/* end of Statistics */
3929
3930/* nic init */
3931
3932/*
3933 * nic init service functions
3934 */
3935
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003936static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003937{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003938 u32 i;
3939 if (!(len%4) && !(addr%4))
3940 for (i = 0; i < len; i += 4)
3941 REG_WR(bp, addr + i, fill);
3942 else
3943 for (i = 0; i < len; i++)
3944 REG_WR8(bp, addr + i, fill);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003945
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003946}
3947
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003948/* helper: writes FP SP data to FW - data_size in dwords */
3949static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3950 int fw_sb_id,
3951 u32 *sb_data_p,
3952 u32 data_size)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003953{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003954 int index;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003955 for (index = 0; index < data_size; index++)
3956 REG_WR(bp, BAR_CSTRORM_INTMEM +
3957 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3958 sizeof(u32)*index,
3959 *(sb_data_p + index));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003960}
3961
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003962static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3963{
3964 u32 *sb_data_p;
3965 u32 data_size = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003966 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003967 struct hc_status_block_data_e1x sb_data_e1x;
3968
3969 /* disable the function first */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003970 if (CHIP_IS_E2(bp)) {
3971 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3972 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3973 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3974 sb_data_e2.common.p_func.vf_valid = false;
3975 sb_data_p = (u32 *)&sb_data_e2;
3976 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3977 } else {
3978 memset(&sb_data_e1x, 0,
3979 sizeof(struct hc_status_block_data_e1x));
3980 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3981 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3982 sb_data_e1x.common.p_func.vf_valid = false;
3983 sb_data_p = (u32 *)&sb_data_e1x;
3984 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3985 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003986 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3987
3988 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3989 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
3990 CSTORM_STATUS_BLOCK_SIZE);
3991 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3992 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
3993 CSTORM_SYNC_BLOCK_SIZE);
3994}
3995
3996/* helper: writes SP SB data to FW */
3997static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
3998 struct hc_sp_status_block_data *sp_sb_data)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003999{
4000 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004001 int i;
4002 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
4003 REG_WR(bp, BAR_CSTRORM_INTMEM +
4004 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
4005 i*sizeof(u32),
4006 *((u32 *)sp_sb_data + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004007}
4008
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004009static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
4010{
4011 int func = BP_FUNC(bp);
4012 struct hc_sp_status_block_data sp_sb_data;
4013 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4014
4015 sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
4016 sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
4017 sp_sb_data.p_func.vf_valid = false;
4018
4019 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
4020
4021 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4022 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
4023 CSTORM_SP_STATUS_BLOCK_SIZE);
4024 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4025 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
4026 CSTORM_SP_SYNC_BLOCK_SIZE);
4027
4028}
4029
4030
4031static inline
4032void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
4033 int igu_sb_id, int igu_seg_id)
4034{
4035 hc_sm->igu_sb_id = igu_sb_id;
4036 hc_sm->igu_seg_id = igu_seg_id;
4037 hc_sm->timer_value = 0xFF;
4038 hc_sm->time_to_expire = 0xFFFFFFFF;
4039}
4040
stephen hemminger8d962862010-10-21 07:50:56 +00004041static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004042 u8 vf_valid, int fw_sb_id, int igu_sb_id)
4043{
4044 int igu_seg_id;
4045
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004046 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004047 struct hc_status_block_data_e1x sb_data_e1x;
4048 struct hc_status_block_sm *hc_sm_p;
4049 struct hc_index_data *hc_index_p;
4050 int data_size;
4051 u32 *sb_data_p;
4052
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004053 if (CHIP_INT_MODE_IS_BC(bp))
4054 igu_seg_id = HC_SEG_ACCESS_NORM;
4055 else
4056 igu_seg_id = IGU_SEG_ACCESS_NORM;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004057
4058 bnx2x_zero_fp_sb(bp, fw_sb_id);
4059
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004060 if (CHIP_IS_E2(bp)) {
4061 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
4062 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
4063 sb_data_e2.common.p_func.vf_id = vfid;
4064 sb_data_e2.common.p_func.vf_valid = vf_valid;
4065 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
4066 sb_data_e2.common.same_igu_sb_1b = true;
4067 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
4068 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
4069 hc_sm_p = sb_data_e2.common.state_machine;
4070 hc_index_p = sb_data_e2.index_data;
4071 sb_data_p = (u32 *)&sb_data_e2;
4072 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
4073 } else {
4074 memset(&sb_data_e1x, 0,
4075 sizeof(struct hc_status_block_data_e1x));
4076 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
4077 sb_data_e1x.common.p_func.vf_id = 0xff;
4078 sb_data_e1x.common.p_func.vf_valid = false;
4079 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
4080 sb_data_e1x.common.same_igu_sb_1b = true;
4081 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
4082 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
4083 hc_sm_p = sb_data_e1x.common.state_machine;
4084 hc_index_p = sb_data_e1x.index_data;
4085 sb_data_p = (u32 *)&sb_data_e1x;
4086 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
4087 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004088
4089 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
4090 igu_sb_id, igu_seg_id);
4091 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
4092 igu_sb_id, igu_seg_id);
4093
4094 DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
4095
4096 /* write indecies to HW */
4097 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
4098}
4099
4100static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
4101 u8 sb_index, u8 disable, u16 usec)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004102{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004103 int port = BP_PORT(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004104 u8 ticks = usec / BNX2X_BTR;
4105
4106 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4107
4108 disable = disable ? 1 : (usec ? 0 : 1);
4109 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4110}
4111
4112static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
4113 u16 tx_usec, u16 rx_usec)
4114{
4115 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
4116 false, rx_usec);
4117 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
4118 false, tx_usec);
4119}
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004120
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004121static void bnx2x_init_def_sb(struct bnx2x *bp)
4122{
4123 struct host_sp_status_block *def_sb = bp->def_status_blk;
4124 dma_addr_t mapping = bp->def_status_blk_mapping;
4125 int igu_sp_sb_index;
4126 int igu_seg_id;
4127 int port = BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004128 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004129 int reg_offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004130 u64 section;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004131 int index;
4132 struct hc_sp_status_block_data sp_sb_data;
4133 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4134
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004135 if (CHIP_INT_MODE_IS_BC(bp)) {
4136 igu_sp_sb_index = DEF_SB_IGU_ID;
4137 igu_seg_id = HC_SEG_ACCESS_DEF;
4138 } else {
4139 igu_sp_sb_index = bp->igu_dsb_id;
4140 igu_seg_id = IGU_SEG_ACCESS_DEF;
4141 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004142
4143 /* ATTN */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004144 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004145 atten_status_block);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004146 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004147
Eliezer Tamir49d66772008-02-28 11:53:13 -08004148 bp->attn_state = 0;
4149
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004150 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4151 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004152 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004153 int sindex;
4154 /* take care of sig[0]..sig[4] */
4155 for (sindex = 0; sindex < 4; sindex++)
4156 bp->attn_group[index].sig[sindex] =
4157 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004158
4159 if (CHIP_IS_E2(bp))
4160 /*
4161 * enable5 is separate from the rest of the registers,
4162 * and therefore the address skip is 4
4163 * and not 16 between the different groups
4164 */
4165 bp->attn_group[index].sig[4] = REG_RD(bp,
4166 reg_offset + 0x10 + 0x4*index);
4167 else
4168 bp->attn_group[index].sig[4] = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004169 }
4170
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004171 if (bp->common.int_block == INT_BLOCK_HC) {
4172 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4173 HC_REG_ATTN_MSG0_ADDR_L);
4174
4175 REG_WR(bp, reg_offset, U64_LO(section));
4176 REG_WR(bp, reg_offset + 4, U64_HI(section));
4177 } else if (CHIP_IS_E2(bp)) {
4178 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4179 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4180 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004181
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004182 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4183 sp_sb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004184
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004185 bnx2x_zero_sp_sb(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004186
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004187 sp_sb_data.host_sb_addr.lo = U64_LO(section);
4188 sp_sb_data.host_sb_addr.hi = U64_HI(section);
4189 sp_sb_data.igu_sb_id = igu_sp_sb_index;
4190 sp_sb_data.igu_seg_id = igu_seg_id;
4191 sp_sb_data.p_func.pf_id = func;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004192 sp_sb_data.p_func.vnic_id = BP_VN(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004193 sp_sb_data.p_func.vf_id = 0xff;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004194
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004195 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004196
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004197 bp->stats_pending = 0;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004198 bp->set_mac_pending = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004199
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004200 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004201}
4202
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004203void bnx2x_update_coalesce(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004204{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004205 int i;
4206
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004207 for_each_eth_queue(bp, i)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004208 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
4209 bp->rx_ticks, bp->tx_ticks);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004210}
4211
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004212static void bnx2x_init_sp_ring(struct bnx2x *bp)
4213{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004214 spin_lock_init(&bp->spq_lock);
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00004215 atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004216
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004217 bp->spq_prod_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004218 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4219 bp->spq_prod_bd = bp->spq;
4220 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004221}
4222
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004223static void bnx2x_init_eq_ring(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004224{
4225 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004226 for (i = 1; i <= NUM_EQ_PAGES; i++) {
4227 union event_ring_elem *elem =
4228 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004229
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004230 elem->next_page.addr.hi =
4231 cpu_to_le32(U64_HI(bp->eq_mapping +
4232 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
4233 elem->next_page.addr.lo =
4234 cpu_to_le32(U64_LO(bp->eq_mapping +
4235 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004236 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004237 bp->eq_cons = 0;
4238 bp->eq_prod = NUM_EQ_DESC;
4239 bp->eq_cons_sb = BNX2X_EQ_INDEX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004240}
4241
4242static void bnx2x_init_ind_table(struct bnx2x *bp)
4243{
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08004244 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004245 int i;
4246
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004247 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004248 return;
4249
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004250 DP(NETIF_MSG_IFUP,
4251 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004252 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004253 REG_WR8(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08004254 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004255 bp->fp->cl_id + (i % (bp->num_queues -
4256 NONE_ETH_CONTEXT_USE)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004257}
4258
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004259void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004260{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004261 int mode = bp->rx_mode;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004262 int port = BP_PORT(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004263 u16 cl_id;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004264 u32 def_q_filters = 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004265
Eilon Greenstein581ce432009-07-29 00:20:04 +00004266 /* All but management unicast packets should pass to the host as well */
4267 u32 llh_mask =
4268 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4269 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4270 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4271 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004272
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004273 switch (mode) {
4274 case BNX2X_RX_MODE_NONE: /* no Rx */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004275 def_q_filters = BNX2X_ACCEPT_NONE;
4276#ifdef BCM_CNIC
4277 if (!NO_FCOE(bp)) {
4278 cl_id = bnx2x_fcoe(bp, cl_id);
4279 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4280 }
4281#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004282 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004283
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004284 case BNX2X_RX_MODE_NORMAL:
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004285 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4286 BNX2X_ACCEPT_MULTICAST;
4287#ifdef BCM_CNIC
Vladislav Zolotarov711c9142011-02-06 11:21:49 -08004288 if (!NO_FCOE(bp)) {
4289 cl_id = bnx2x_fcoe(bp, cl_id);
4290 bnx2x_rxq_set_mac_filters(bp, cl_id,
4291 BNX2X_ACCEPT_UNICAST |
4292 BNX2X_ACCEPT_MULTICAST);
4293 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004294#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004295 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004296
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004297 case BNX2X_RX_MODE_ALLMULTI:
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004298 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4299 BNX2X_ACCEPT_ALL_MULTICAST;
4300#ifdef BCM_CNIC
Vladislav Zolotarov711c9142011-02-06 11:21:49 -08004301 /*
4302 * Prevent duplication of multicast packets by configuring FCoE
4303 * L2 Client to receive only matched unicast frames.
4304 */
4305 if (!NO_FCOE(bp)) {
4306 cl_id = bnx2x_fcoe(bp, cl_id);
4307 bnx2x_rxq_set_mac_filters(bp, cl_id,
4308 BNX2X_ACCEPT_UNICAST);
4309 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004310#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004311 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004312
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004313 case BNX2X_RX_MODE_PROMISC:
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004314 def_q_filters |= BNX2X_PROMISCUOUS_MODE;
4315#ifdef BCM_CNIC
Vladislav Zolotarov711c9142011-02-06 11:21:49 -08004316 /*
4317 * Prevent packets duplication by configuring DROP_ALL for FCoE
4318 * L2 Client.
4319 */
4320 if (!NO_FCOE(bp)) {
4321 cl_id = bnx2x_fcoe(bp, cl_id);
4322 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4323 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004324#endif
Eilon Greenstein581ce432009-07-29 00:20:04 +00004325 /* pass management unicast packets as well */
4326 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004327 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004328
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004329 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004330 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4331 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004332 }
4333
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004334 cl_id = BP_L_ID(bp);
4335 bnx2x_rxq_set_mac_filters(bp, cl_id, def_q_filters);
4336
Eilon Greenstein581ce432009-07-29 00:20:04 +00004337 REG_WR(bp,
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004338 (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
4339 NIG_REG_LLH0_BRB1_DRV_MASK), llh_mask);
Eilon Greenstein581ce432009-07-29 00:20:04 +00004340
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004341 DP(NETIF_MSG_IFUP, "rx mode %d\n"
4342 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004343 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n"
4344 "unmatched_ucast 0x%x\n", mode,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004345 bp->mac_filters.ucast_drop_all,
4346 bp->mac_filters.mcast_drop_all,
4347 bp->mac_filters.bcast_drop_all,
4348 bp->mac_filters.ucast_accept_all,
4349 bp->mac_filters.mcast_accept_all,
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004350 bp->mac_filters.bcast_accept_all,
4351 bp->mac_filters.unmatched_unicast
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004352 );
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004353
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004354 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004355}
4356
Eilon Greenstein471de712008-08-13 15:49:35 -07004357static void bnx2x_init_internal_common(struct bnx2x *bp)
4358{
4359 int i;
4360
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004361 if (!CHIP_IS_E1(bp)) {
4362
4363 /* xstorm needs to know whether to add ovlan to packets or not,
4364 * in switch-independent we'll write 0 to here... */
4365 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004366 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004367 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004368 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004369 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004370 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004371 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004372 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004373 }
4374
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08004375 if (IS_MF_SI(bp))
4376 /*
4377 * In switch independent mode, the TSTORM needs to accept
4378 * packets that failed classification, since approximate match
4379 * mac addresses aren't written to NIG LLH
4380 */
4381 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4382 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
4383
Eilon Greenstein471de712008-08-13 15:49:35 -07004384 /* Zero this manually as its initialization is
4385 currently missing in the initTool */
4386 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4387 REG_WR(bp, BAR_USTRORM_INTMEM +
4388 USTORM_AGG_DATA_OFFSET + i * 4, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004389 if (CHIP_IS_E2(bp)) {
4390 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
4391 CHIP_INT_MODE_IS_BC(bp) ?
4392 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
4393 }
Eilon Greenstein471de712008-08-13 15:49:35 -07004394}
4395
4396static void bnx2x_init_internal_port(struct bnx2x *bp)
4397{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004398 /* port */
Vladislav Zolotarove4901dd2010-12-13 05:44:18 +00004399 bnx2x_dcb_init_intmem_pfc(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004400}
4401
Eilon Greenstein471de712008-08-13 15:49:35 -07004402static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4403{
4404 switch (load_code) {
4405 case FW_MSG_CODE_DRV_LOAD_COMMON:
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004406 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
Eilon Greenstein471de712008-08-13 15:49:35 -07004407 bnx2x_init_internal_common(bp);
4408 /* no break */
4409
4410 case FW_MSG_CODE_DRV_LOAD_PORT:
4411 bnx2x_init_internal_port(bp);
4412 /* no break */
4413
4414 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004415 /* internal memory per function is
4416 initialized inside bnx2x_pf_init */
Eilon Greenstein471de712008-08-13 15:49:35 -07004417 break;
4418
4419 default:
4420 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4421 break;
4422 }
4423}
4424
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004425static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
4426{
4427 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
4428
4429 fp->state = BNX2X_FP_STATE_CLOSED;
4430
4431 fp->index = fp->cid = fp_idx;
4432 fp->cl_id = BP_L_ID(bp) + fp_idx;
4433 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
4434 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
4435 /* qZone id equals to FW (per path) client id */
4436 fp->cl_qzone_id = fp->cl_id +
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004437 BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4438 ETH_MAX_RX_CLIENTS_E1H);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004439 /* init shortcut */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004440 fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4441 USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004442 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4443 /* Setup SB indicies */
4444 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4445 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4446
4447 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
4448 "cl_id %d fw_sb %d igu_sb %d\n",
4449 fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4450 fp->igu_sb_id);
4451 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4452 fp->fw_sb_id, fp->igu_sb_id);
4453
4454 bnx2x_update_fpsb_idx(fp);
4455}
4456
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004457void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004458{
4459 int i;
4460
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004461 for_each_eth_queue(bp, i)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004462 bnx2x_init_fp_sb(bp, i);
Michael Chan37b091b2009-10-10 13:46:55 +00004463#ifdef BCM_CNIC
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004464 if (!NO_FCOE(bp))
4465 bnx2x_init_fcoe_fp(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004466
4467 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4468 BNX2X_VF_ID_INVALID, false,
4469 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4470
Michael Chan37b091b2009-10-10 13:46:55 +00004471#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004472
Eilon Greenstein16119782009-03-02 07:59:27 +00004473 /* ensure status block indices were read */
4474 rmb();
4475
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004476 bnx2x_init_def_sb(bp);
Eilon Greenstein5c862842008-08-13 15:51:48 -07004477 bnx2x_update_dsb_idx(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004478 bnx2x_init_rx_rings(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004479 bnx2x_init_tx_rings(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004480 bnx2x_init_sp_ring(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004481 bnx2x_init_eq_ring(bp);
Eilon Greenstein471de712008-08-13 15:49:35 -07004482 bnx2x_init_internal(bp, load_code);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004483 bnx2x_pf_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004484 bnx2x_init_ind_table(bp);
Eilon Greenstein0ef00452009-01-14 21:31:08 -08004485 bnx2x_stats_init(bp);
4486
4487 /* At this point, we are ready for interrupts */
4488 atomic_set(&bp->intr_sem, 0);
4489
4490 /* flush all before enabling interrupts */
4491 mb();
4492 mmiowb();
4493
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08004494 bnx2x_int_enable(bp);
Eilon Greensteineb8da202009-07-21 05:47:30 +00004495
4496 /* Check for SPIO5 */
4497 bnx2x_attn_int_deasserted0(bp,
4498 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4499 AEU_INPUTS_ATTN_BITS_SPIO5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004500}
4501
4502/* end of nic init */
4503
4504/*
4505 * gzip service functions
4506 */
4507
4508static int bnx2x_gunzip_init(struct bnx2x *bp)
4509{
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004510 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4511 &bp->gunzip_mapping, GFP_KERNEL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004512 if (bp->gunzip_buf == NULL)
4513 goto gunzip_nomem1;
4514
4515 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4516 if (bp->strm == NULL)
4517 goto gunzip_nomem2;
4518
4519 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4520 GFP_KERNEL);
4521 if (bp->strm->workspace == NULL)
4522 goto gunzip_nomem3;
4523
4524 return 0;
4525
4526gunzip_nomem3:
4527 kfree(bp->strm);
4528 bp->strm = NULL;
4529
4530gunzip_nomem2:
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004531 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4532 bp->gunzip_mapping);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004533 bp->gunzip_buf = NULL;
4534
4535gunzip_nomem1:
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004536 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4537 " un-compression\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004538 return -ENOMEM;
4539}
4540
4541static void bnx2x_gunzip_end(struct bnx2x *bp)
4542{
4543 kfree(bp->strm->workspace);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004544 kfree(bp->strm);
4545 bp->strm = NULL;
4546
4547 if (bp->gunzip_buf) {
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004548 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4549 bp->gunzip_mapping);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004550 bp->gunzip_buf = NULL;
4551 }
4552}
4553
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004554static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004555{
4556 int n, rc;
4557
4558 /* check gzip header */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004559 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4560 BNX2X_ERR("Bad gzip header\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004561 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004562 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004563
4564 n = 10;
4565
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004566#define FNAME 0x8
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004567
4568 if (zbuf[3] & FNAME)
4569 while ((zbuf[n++] != 0) && (n < len));
4570
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004571 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004572 bp->strm->avail_in = len - n;
4573 bp->strm->next_out = bp->gunzip_buf;
4574 bp->strm->avail_out = FW_BUF_SIZE;
4575
4576 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4577 if (rc != Z_OK)
4578 return rc;
4579
4580 rc = zlib_inflate(bp->strm, Z_FINISH);
4581 if ((rc != Z_OK) && (rc != Z_STREAM_END))
Joe Perches7995c642010-02-17 15:01:52 +00004582 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4583 bp->strm->msg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004584
4585 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4586 if (bp->gunzip_outlen & 0x3)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004587 netdev_err(bp->dev, "Firmware decompression error:"
4588 " gunzip_outlen (%d) not aligned\n",
4589 bp->gunzip_outlen);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004590 bp->gunzip_outlen >>= 2;
4591
4592 zlib_inflateEnd(bp->strm);
4593
4594 if (rc == Z_STREAM_END)
4595 return 0;
4596
4597 return rc;
4598}
4599
4600/* nic load/unload */
4601
4602/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004603 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004604 */
4605
4606/* send a NIG loopback debug packet */
4607static void bnx2x_lb_pckt(struct bnx2x *bp)
4608{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004609 u32 wb_write[3];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004610
4611 /* Ethernet source and destination addresses */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004612 wb_write[0] = 0x55555555;
4613 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004614 wb_write[2] = 0x20; /* SOP */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004615 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004616
4617 /* NON-IP protocol */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004618 wb_write[0] = 0x09000000;
4619 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004620 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004621 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004622}
4623
4624/* some of the internal memories
4625 * are not directly readable from the driver
4626 * to test them we send debug packets
4627 */
4628static int bnx2x_int_mem_test(struct bnx2x *bp)
4629{
4630 int factor;
4631 int count, i;
4632 u32 val = 0;
4633
Eilon Greensteinad8d3942008-06-23 20:29:02 -07004634 if (CHIP_REV_IS_FPGA(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004635 factor = 120;
Eilon Greensteinad8d3942008-06-23 20:29:02 -07004636 else if (CHIP_REV_IS_EMUL(bp))
4637 factor = 200;
4638 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004639 factor = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004640
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004641 /* Disable inputs of parser neighbor blocks */
4642 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4643 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4644 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004645 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004646
4647 /* Write 0 to parser credits for CFC search request */
4648 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4649
4650 /* send Ethernet packet */
4651 bnx2x_lb_pckt(bp);
4652
4653 /* TODO do i reset NIG statistic? */
4654 /* Wait until NIG register shows 1 packet of size 0x10 */
4655 count = 1000 * factor;
4656 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004657
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004658 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4659 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004660 if (val == 0x10)
4661 break;
4662
4663 msleep(10);
4664 count--;
4665 }
4666 if (val != 0x10) {
4667 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4668 return -1;
4669 }
4670
4671 /* Wait until PRS register shows 1 packet */
4672 count = 1000 * factor;
4673 while (count) {
4674 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004675 if (val == 1)
4676 break;
4677
4678 msleep(10);
4679 count--;
4680 }
4681 if (val != 0x1) {
4682 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4683 return -2;
4684 }
4685
4686 /* Reset and init BRB, PRS */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004687 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004688 msleep(50);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004689 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004690 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004691 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4692 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004693
4694 DP(NETIF_MSG_HW, "part2\n");
4695
4696 /* Disable inputs of parser neighbor blocks */
4697 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4698 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4699 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004700 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004701
4702 /* Write 0 to parser credits for CFC search request */
4703 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4704
4705 /* send 10 Ethernet packets */
4706 for (i = 0; i < 10; i++)
4707 bnx2x_lb_pckt(bp);
4708
4709 /* Wait until NIG register shows 10 + 1
4710 packets of size 11*0x10 = 0xb0 */
4711 count = 1000 * factor;
4712 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004713
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004714 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4715 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004716 if (val == 0xb0)
4717 break;
4718
4719 msleep(10);
4720 count--;
4721 }
4722 if (val != 0xb0) {
4723 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4724 return -3;
4725 }
4726
4727 /* Wait until PRS register shows 2 packets */
4728 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4729 if (val != 2)
4730 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4731
4732 /* Write 1 to parser credits for CFC search request */
4733 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4734
4735 /* Wait until PRS register shows 3 packets */
4736 msleep(10 * factor);
4737 /* Wait until NIG register shows 1 packet of size 0x10 */
4738 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4739 if (val != 3)
4740 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4741
4742 /* clear NIG EOP FIFO */
4743 for (i = 0; i < 11; i++)
4744 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4745 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4746 if (val != 1) {
4747 BNX2X_ERR("clear of NIG failed\n");
4748 return -4;
4749 }
4750
4751 /* Reset and init BRB, PRS, NIG */
4752 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4753 msleep(50);
4754 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4755 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004756 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4757 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00004758#ifndef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004759 /* set NIC mode */
4760 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4761#endif
4762
4763 /* Enable inputs of parser neighbor blocks */
4764 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4765 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4766 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004767 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004768
4769 DP(NETIF_MSG_HW, "done\n");
4770
4771 return 0; /* OK */
4772}
4773
Vladislav Zolotarov4a33bc02011-01-09 02:20:04 +00004774static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004775{
4776 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004777 if (CHIP_IS_E2(bp))
4778 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4779 else
4780 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004781 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4782 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004783 /*
4784 * mask read length error interrupts in brb for parser
4785 * (parsing unit and 'checksum and crc' unit)
4786 * these errors are legal (PU reads fixed length and CAC can cause
4787 * read length error on truncated packets)
4788 */
4789 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004790 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4791 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4792 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4793 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4794 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004795/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4796/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004797 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4798 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4799 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004800/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4801/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004802 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4803 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4804 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4805 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004806/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4807/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004808
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004809 if (CHIP_REV_IS_FPGA(bp))
4810 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004811 else if (CHIP_IS_E2(bp))
4812 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4813 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4814 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4815 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4816 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4817 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004818 else
4819 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004820 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4821 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4822 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004823/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4824/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004825 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4826 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004827/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
Vladislav Zolotarov4a33bc02011-01-09 02:20:04 +00004828 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004829}
4830
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00004831static void bnx2x_reset_common(struct bnx2x *bp)
4832{
4833 /* reset_common */
4834 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4835 0xd3ffff7f);
4836 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4837}
4838
Eilon Greenstein573f2032009-08-12 08:24:14 +00004839static void bnx2x_init_pxp(struct bnx2x *bp)
4840{
4841 u16 devctl;
4842 int r_order, w_order;
4843
4844 pci_read_config_word(bp->pdev,
4845 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4846 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4847 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4848 if (bp->mrrs == -1)
4849 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4850 else {
4851 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4852 r_order = bp->mrrs;
4853 }
4854
4855 bnx2x_init_pxp_arb(bp, r_order, w_order);
4856}
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004857
4858static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4859{
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004860 int is_required;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004861 u32 val;
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004862 int port;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004863
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004864 if (BP_NOMCP(bp))
4865 return;
4866
4867 is_required = 0;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004868 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4869 SHARED_HW_CFG_FAN_FAILURE_MASK;
4870
4871 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4872 is_required = 1;
4873
4874 /*
4875 * The fan failure mechanism is usually related to the PHY type since
4876 * the power consumption of the board is affected by the PHY. Currently,
4877 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4878 */
4879 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4880 for (port = PORT_0; port < PORT_MAX; port++) {
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004881 is_required |=
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004882 bnx2x_fan_failure_det_req(
4883 bp,
4884 bp->common.shmem_base,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00004885 bp->common.shmem2_base,
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004886 port);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004887 }
4888
4889 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4890
4891 if (is_required == 0)
4892 return;
4893
4894 /* Fan failure is indicated by SPIO 5 */
4895 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4896 MISC_REGISTERS_SPIO_INPUT_HI_Z);
4897
4898 /* set to active low mode */
4899 val = REG_RD(bp, MISC_REG_SPIO_INT);
4900 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004901 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004902 REG_WR(bp, MISC_REG_SPIO_INT, val);
4903
4904 /* enable interrupt to signal the IGU */
4905 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4906 val |= (1 << MISC_REGISTERS_SPIO_5);
4907 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4908}
4909
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004910static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4911{
4912 u32 offset = 0;
4913
4914 if (CHIP_IS_E1(bp))
4915 return;
4916 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4917 return;
4918
4919 switch (BP_ABS_FUNC(bp)) {
4920 case 0:
4921 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4922 break;
4923 case 1:
4924 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4925 break;
4926 case 2:
4927 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4928 break;
4929 case 3:
4930 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4931 break;
4932 case 4:
4933 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4934 break;
4935 case 5:
4936 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4937 break;
4938 case 6:
4939 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4940 break;
4941 case 7:
4942 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4943 break;
4944 default:
4945 return;
4946 }
4947
4948 REG_WR(bp, offset, pretend_func_num);
4949 REG_RD(bp, offset);
4950 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4951}
4952
4953static void bnx2x_pf_disable(struct bnx2x *bp)
4954{
4955 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4956 val &= ~IGU_PF_CONF_FUNC_EN;
4957
4958 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4959 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4960 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4961}
4962
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004963static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004964{
4965 u32 val, i;
4966
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004967 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004968
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00004969 bnx2x_reset_common(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004970 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4971 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
4972
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004973 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004974 if (!CHIP_IS_E1(bp))
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004975 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004976
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004977 if (CHIP_IS_E2(bp)) {
4978 u8 fid;
4979
4980 /**
4981 * 4-port mode or 2-port mode we need to turn of master-enable
4982 * for everyone, after that, turn it back on for self.
4983 * so, we disregard multi-function or not, and always disable
4984 * for all functions on the given path, this means 0,2,4,6 for
4985 * path 0 and 1,3,5,7 for path 1
4986 */
4987 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
4988 if (fid == BP_ABS_FUNC(bp)) {
4989 REG_WR(bp,
4990 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
4991 1);
4992 continue;
4993 }
4994
4995 bnx2x_pretend_func(bp, fid);
4996 /* clear pf enable */
4997 bnx2x_pf_disable(bp);
4998 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4999 }
5000 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005001
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005002 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005003 if (CHIP_IS_E1(bp)) {
5004 /* enable HW interrupt from PXP on USDM overflow
5005 bit 16 on INT_MASK_0 */
5006 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005007 }
5008
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005009 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005010 bnx2x_init_pxp(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005011
5012#ifdef __BIG_ENDIAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005013 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5014 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5015 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5016 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5017 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
Eilon Greenstein8badd272009-02-12 08:36:15 +00005018 /* make sure this value is 0 */
5019 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005020
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005021/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5022 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5023 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5024 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5025 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005026#endif
5027
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005028 bnx2x_ilt_init_page_size(bp, INITOP_SET);
5029
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005030 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5031 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005032
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005033 /* let the HW do it's magic ... */
5034 msleep(100);
5035 /* finish PXP init */
5036 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5037 if (val != 1) {
5038 BNX2X_ERR("PXP2 CFG failed\n");
5039 return -EBUSY;
5040 }
5041 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5042 if (val != 1) {
5043 BNX2X_ERR("PXP2 RD_INIT failed\n");
5044 return -EBUSY;
5045 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005046
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005047 /* Timers bug workaround E2 only. We need to set the entire ILT to
5048 * have entries with value "0" and valid bit on.
5049 * This needs to be done by the first PF that is loaded in a path
5050 * (i.e. common phase)
5051 */
5052 if (CHIP_IS_E2(bp)) {
5053 struct ilt_client_info ilt_cli;
5054 struct bnx2x_ilt ilt;
5055 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
5056 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
5057
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04005058 /* initialize dummy TM client */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005059 ilt_cli.start = 0;
5060 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
5061 ilt_cli.client_num = ILT_CLIENT_TM;
5062
5063 /* Step 1: set zeroes to all ilt page entries with valid bit on
5064 * Step 2: set the timers first/last ilt entry to point
5065 * to the entire range to prevent ILT range error for 3rd/4th
5066 * vnic (this code assumes existance of the vnic)
5067 *
5068 * both steps performed by call to bnx2x_ilt_client_init_op()
5069 * with dummy TM client
5070 *
5071 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
5072 * and his brother are split registers
5073 */
5074 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
5075 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
5076 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
5077
5078 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
5079 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
5080 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
5081 }
5082
5083
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005084 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5085 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005086
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005087 if (CHIP_IS_E2(bp)) {
5088 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
5089 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
5090 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
5091
5092 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
5093
5094 /* let the HW do it's magic ... */
5095 do {
5096 msleep(200);
5097 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
5098 } while (factor-- && (val != 1));
5099
5100 if (val != 1) {
5101 BNX2X_ERR("ATC_INIT failed\n");
5102 return -EBUSY;
5103 }
5104 }
5105
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005106 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005107
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005108 /* clean the DMAE memory */
5109 bp->dmae_ready = 1;
5110 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005111
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005112 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5113 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5114 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5115 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005116
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005117 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5118 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5119 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5120 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5121
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005122 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00005123
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005124 if (CHIP_MODE_IS_4_PORT(bp))
5125 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005126
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005127 /* QM queues pointers table */
5128 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
Michael Chan37b091b2009-10-10 13:46:55 +00005129
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005130 /* soft reset pulse */
5131 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5132 REG_WR(bp, QM_REG_SOFT_RESET, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005133
Michael Chan37b091b2009-10-10 13:46:55 +00005134#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005135 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005136#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005137
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005138 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005139 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
5140
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005141 if (!CHIP_REV_IS_SLOW(bp)) {
5142 /* enable hw interrupt from doorbell Q */
5143 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5144 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005145
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005146 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005147 if (CHIP_MODE_IS_4_PORT(bp)) {
5148 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5149 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5150 }
5151
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005152 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08005153 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
Michael Chan37b091b2009-10-10 13:46:55 +00005154#ifndef BCM_CNIC
Eilon Greenstein3196a882008-08-13 15:58:49 -07005155 /* set NIC mode */
5156 REG_WR(bp, PRS_REG_NIC_MODE, 1);
Michael Chan37b091b2009-10-10 13:46:55 +00005157#endif
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005158 if (!CHIP_IS_E1(bp))
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005159 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF_SD(bp));
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005160
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005161 if (CHIP_IS_E2(bp)) {
5162 /* Bit-map indicating which L2 hdrs may appear after the
5163 basic Ethernet header */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005164 int has_ovlan = IS_MF_SD(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005165 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5166 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5167 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005168
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005169 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5170 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5171 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5172 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005173
Eilon Greensteinca003922009-08-12 22:53:28 -07005174 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5175 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5176 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5177 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005178
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005179 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5180 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5181 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5182 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005183
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005184 if (CHIP_MODE_IS_4_PORT(bp))
5185 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5186
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005187 /* sync semi rtc */
5188 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5189 0x80000000);
5190 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5191 0x80000000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005192
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005193 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5194 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5195 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005196
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005197 if (CHIP_IS_E2(bp)) {
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005198 int has_ovlan = IS_MF_SD(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005199 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5200 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5201 }
5202
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005203 REG_WR(bp, SRC_REG_SOFT_RST, 1);
Tom Herbertc68ed252010-04-23 00:10:52 -07005204 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5205 REG_WR(bp, i, random32());
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005206
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005207 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00005208#ifdef BCM_CNIC
5209 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
5210 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
5211 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
5212 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
5213 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
5214 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
5215 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
5216 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
5217 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
5218 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
5219#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005220 REG_WR(bp, SRC_REG_SOFT_RST, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005221
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005222 if (sizeof(union cdu_context) != 1024)
5223 /* we currently assume that a context is 1024 bytes */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005224 dev_alert(&bp->pdev->dev, "please adjust the size "
5225 "of cdu_context(%ld)\n",
Joe Perches7995c642010-02-17 15:01:52 +00005226 (long)sizeof(union cdu_context));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005227
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005228 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005229 val = (4 << 24) + (0 << 12) + 1024;
5230 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005231
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005232 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005233 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005234 /* enable context validation interrupt from CFC */
5235 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5236
5237 /* set the thresholds to prevent CFC/CDU race */
5238 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005239
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005240 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005241
5242 if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5243 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5244
5245 bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005246 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005247
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005248 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005249 /* Reset PCIE errors for debug */
5250 REG_WR(bp, 0x2814, 0xffffffff);
5251 REG_WR(bp, 0x3820, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005252
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005253 if (CHIP_IS_E2(bp)) {
5254 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5255 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5256 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5257 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5258 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5259 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5260 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5261 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5262 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5263 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5264 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5265 }
5266
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005267 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005268 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005269 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005270 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005271
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005272 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005273 if (!CHIP_IS_E1(bp)) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005274 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005275 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005276 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005277 if (CHIP_IS_E2(bp)) {
5278 /* Bit-map indicating which L2 hdrs may appear after the
5279 basic Ethernet header */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005280 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF_SD(bp) ? 7 : 6));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005281 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005282
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005283 if (CHIP_REV_IS_SLOW(bp))
5284 msleep(200);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005285
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005286 /* finish CFC init */
5287 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5288 if (val != 1) {
5289 BNX2X_ERR("CFC LL_INIT failed\n");
5290 return -EBUSY;
5291 }
5292 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5293 if (val != 1) {
5294 BNX2X_ERR("CFC AC_INIT failed\n");
5295 return -EBUSY;
5296 }
5297 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5298 if (val != 1) {
5299 BNX2X_ERR("CFC CAM_INIT failed\n");
5300 return -EBUSY;
5301 }
5302 REG_WR(bp, CFC_REG_DEBUG0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005303
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005304 if (CHIP_IS_E1(bp)) {
5305 /* read NIG statistic
5306 to see if this is our first up since powerup */
5307 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5308 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005309
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005310 /* do internal memory self test */
5311 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5312 BNX2X_ERR("internal mem self test failed\n");
5313 return -EBUSY;
5314 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005315 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005316
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00005317 bnx2x_setup_fan_failure_detection(bp);
5318
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005319 /* clear PXP2 attentions */
5320 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005321
Vladislav Zolotarov4a33bc02011-01-09 02:20:04 +00005322 bnx2x_enable_blocks_attention(bp);
5323 if (CHIP_PARITY_ENABLED(bp))
5324 bnx2x_enable_blocks_parity(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005325
Yaniv Rosner6bbca912008-08-13 15:57:28 -07005326 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005327 /* In E2 2-PORT mode, same ext phy is used for the two paths */
5328 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
5329 CHIP_IS_E1x(bp)) {
5330 u32 shmem_base[2], shmem2_base[2];
5331 shmem_base[0] = bp->common.shmem_base;
5332 shmem2_base[0] = bp->common.shmem2_base;
5333 if (CHIP_IS_E2(bp)) {
5334 shmem_base[1] =
5335 SHMEM2_RD(bp, other_shmem_base_addr);
5336 shmem2_base[1] =
5337 SHMEM2_RD(bp, other_shmem2_base_addr);
5338 }
5339 bnx2x_acquire_phy_lock(bp);
5340 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5341 bp->common.chip_id);
5342 bnx2x_release_phy_lock(bp);
5343 }
Yaniv Rosner6bbca912008-08-13 15:57:28 -07005344 } else
5345 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5346
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005347 return 0;
5348}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005349
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005350static int bnx2x_init_hw_port(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005351{
5352 int port = BP_PORT(bp);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005353 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
Eilon Greenstein1c063282009-02-12 08:36:43 +00005354 u32 low, high;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005355 u32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005356
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005357 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005358
5359 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005360
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005361 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005362 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07005363
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005364 /* Timers bug workaround: disables the pf_master bit in pglue at
5365 * common phase, we need to enable it here before any dmae access are
5366 * attempted. Therefore we manually added the enable-master to the
5367 * port phase (it also happens in the function phase)
5368 */
5369 if (CHIP_IS_E2(bp))
5370 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5371
Eilon Greensteinca003922009-08-12 22:53:28 -07005372 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5373 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5374 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005375 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005376
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005377 /* QM cid (connection) count */
5378 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005379
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005380#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005381 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
Michael Chan37b091b2009-10-10 13:46:55 +00005382 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
5383 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005384#endif
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005385
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005386 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00005387
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005388 if (CHIP_MODE_IS_4_PORT(bp))
5389 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00005390
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005391 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
5392 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5393 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
5394 /* no pause for emulation and FPGA */
5395 low = 0;
5396 high = 513;
5397 } else {
5398 if (IS_MF(bp))
5399 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5400 else if (bp->dev->mtu > 4096) {
5401 if (bp->flags & ONE_PORT_FLAG)
5402 low = 160;
5403 else {
5404 val = bp->dev->mtu;
5405 /* (24*1024 + val*4)/256 */
5406 low = 96 + (val/64) +
5407 ((val % 64) ? 1 : 0);
5408 }
5409 } else
5410 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5411 high = low + 56; /* 14*1024/256 */
5412 }
5413 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5414 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5415 }
5416
5417 if (CHIP_MODE_IS_4_PORT(bp)) {
5418 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5419 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5420 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5421 BRB1_REG_MAC_GUARANTIED_0), 40);
5422 }
Eilon Greenstein1c063282009-02-12 08:36:43 +00005423
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005424 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07005425
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005426 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005427 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005428 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005429 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005430
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005431 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5432 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5433 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5434 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005435 if (CHIP_MODE_IS_4_PORT(bp))
5436 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005437
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005438 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005439 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005440
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005441 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005442
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005443 if (!CHIP_IS_E2(bp)) {
5444 /* configure PBF to work without PAUSE mtu 9000 */
5445 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005446
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005447 /* update threshold */
5448 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5449 /* update init credit */
5450 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005451
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005452 /* probe changes */
5453 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5454 udelay(50);
5455 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5456 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005457
Michael Chan37b091b2009-10-10 13:46:55 +00005458#ifdef BCM_CNIC
5459 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005460#endif
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005461 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005462 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005463
5464 if (CHIP_IS_E1(bp)) {
5465 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5466 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5467 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005468 bnx2x_init_block(bp, HC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005469
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005470 bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5471
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005472 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005473 /* init aeu_mask_attn_func_0/1:
5474 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5475 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5476 * bits 4-7 are used for "per vn group attention" */
Vladislav Zolotarove4901dd2010-12-13 05:44:18 +00005477 val = IS_MF(bp) ? 0xF7 : 0x7;
5478 /* Enable DCBX attention for all but E1 */
5479 val |= CHIP_IS_E1(bp) ? 0 : 0x10;
5480 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005481
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005482 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005483 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005484 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005485 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005486 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005487
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005488 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005489
5490 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5491
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005492 if (!CHIP_IS_E1(bp)) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005493 /* 0x2 disable mf_ov, 0x1 enable */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005494 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005495 (IS_MF_SD(bp) ? 0x1 : 0x2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005496
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005497 if (CHIP_IS_E2(bp)) {
5498 val = 0;
5499 switch (bp->mf_mode) {
5500 case MULTI_FUNCTION_SD:
5501 val = 1;
5502 break;
5503 case MULTI_FUNCTION_SI:
5504 val = 2;
5505 break;
5506 }
5507
5508 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5509 NIG_REG_LLH0_CLS_TYPE), val);
5510 }
Eilon Greenstein1c063282009-02-12 08:36:43 +00005511 {
5512 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5513 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5514 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5515 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005516 }
5517
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005518 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005519 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00005520 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00005521 bp->common.shmem2_base, port)) {
Eilon Greenstein4d295db2009-07-21 05:47:47 +00005522 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5523 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5524 val = REG_RD(bp, reg_addr);
Eliezer Tamirf1410642008-02-28 11:51:50 -08005525 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
Eilon Greenstein4d295db2009-07-21 05:47:47 +00005526 REG_WR(bp, reg_addr, val);
Eliezer Tamirf1410642008-02-28 11:51:50 -08005527 }
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07005528 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005529
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005530 return 0;
5531}
5532
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005533static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5534{
5535 int reg;
5536
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005537 if (CHIP_IS_E1(bp))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005538 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005539 else
5540 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005541
5542 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5543}
5544
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005545static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5546{
5547 bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5548}
5549
5550static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5551{
5552 u32 i, base = FUNC_ILT_BASE(func);
5553 for (i = base; i < base + ILT_PER_FUNC; i++)
5554 bnx2x_ilt_wr(bp, i, 0);
5555}
5556
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005557static int bnx2x_init_hw_func(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005558{
5559 int port = BP_PORT(bp);
5560 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005561 struct bnx2x_ilt *ilt = BP_ILT(bp);
5562 u16 cdu_ilt_start;
Eilon Greenstein8badd272009-02-12 08:36:15 +00005563 u32 addr, val;
Vladislav Zolotarovf4a66892010-10-19 05:13:09 +00005564 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
5565 int i, main_mem_width;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005566
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005567 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005568
Eilon Greenstein8badd272009-02-12 08:36:15 +00005569 /* set MSI reconfigure capability */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005570 if (bp->common.int_block == INT_BLOCK_HC) {
5571 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5572 val = REG_RD(bp, addr);
5573 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5574 REG_WR(bp, addr, val);
5575 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00005576
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005577 ilt = BP_ILT(bp);
5578 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005579
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005580 for (i = 0; i < L2_ILT_LINES(bp); i++) {
5581 ilt->lines[cdu_ilt_start + i].page =
5582 bp->context.vcxt + (ILT_PAGE_CIDS * i);
5583 ilt->lines[cdu_ilt_start + i].page_mapping =
5584 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
5585 /* cdu ilt pages are allocated manually so there's no need to
5586 set the size */
5587 }
5588 bnx2x_ilt_init_op(bp, INITOP_SET);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005589
Michael Chan37b091b2009-10-10 13:46:55 +00005590#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005591 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
Michael Chan37b091b2009-10-10 13:46:55 +00005592
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005593 /* T1 hash bits value determines the T1 number of entries */
5594 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
Michael Chan37b091b2009-10-10 13:46:55 +00005595#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005596
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005597#ifndef BCM_CNIC
5598 /* set NIC mode */
5599 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5600#endif /* BCM_CNIC */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005601
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005602 if (CHIP_IS_E2(bp)) {
5603 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5604
5605 /* Turn on a single ISR mode in IGU if driver is going to use
5606 * INT#x or MSI
5607 */
5608 if (!(bp->flags & USING_MSIX_FLAG))
5609 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5610 /*
5611 * Timers workaround bug: function init part.
5612 * Need to wait 20msec after initializing ILT,
5613 * needed to make sure there are no requests in
5614 * one of the PXP internal queues with "old" ILT addresses
5615 */
5616 msleep(20);
5617 /*
5618 * Master enable - Due to WB DMAE writes performed before this
5619 * register is re-initialized as part of the regular function
5620 * init
5621 */
5622 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5623 /* Enable the function in IGU */
5624 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5625 }
5626
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005627 bp->dmae_ready = 1;
5628
5629 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
5630
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005631 if (CHIP_IS_E2(bp))
5632 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5633
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005634 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5635 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5636 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5637 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5638 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5639 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5640 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5641 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5642 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5643
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005644 if (CHIP_IS_E2(bp)) {
5645 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5646 BP_PATH(bp));
5647 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5648 BP_PATH(bp));
5649 }
5650
5651 if (CHIP_MODE_IS_4_PORT(bp))
5652 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5653
5654 if (CHIP_IS_E2(bp))
5655 REG_WR(bp, QM_REG_PF_EN, 1);
5656
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005657 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005658
5659 if (CHIP_MODE_IS_4_PORT(bp))
5660 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5661
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005662 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5663 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5664 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5665 bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5666 bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5667 bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5668 bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5669 bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5670 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5671 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5672 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005673 if (CHIP_IS_E2(bp))
5674 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5675
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005676 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
5677
5678 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
5679
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005680 if (CHIP_IS_E2(bp))
5681 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5682
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005683 if (IS_MF(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005684 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005685 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005686 }
5687
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005688 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5689
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005690 /* HC init per function */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005691 if (bp->common.int_block == INT_BLOCK_HC) {
5692 if (CHIP_IS_E1H(bp)) {
5693 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5694
5695 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5696 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5697 }
5698 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5699
5700 } else {
5701 int num_segs, sb_idx, prod_offset;
5702
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005703 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5704
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005705 if (CHIP_IS_E2(bp)) {
5706 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5707 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5708 }
5709
5710 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5711
5712 if (CHIP_IS_E2(bp)) {
5713 int dsb_idx = 0;
5714 /**
5715 * Producer memory:
5716 * E2 mode: address 0-135 match to the mapping memory;
5717 * 136 - PF0 default prod; 137 - PF1 default prod;
5718 * 138 - PF2 default prod; 139 - PF3 default prod;
5719 * 140 - PF0 attn prod; 141 - PF1 attn prod;
5720 * 142 - PF2 attn prod; 143 - PF3 attn prod;
5721 * 144-147 reserved.
5722 *
5723 * E1.5 mode - In backward compatible mode;
5724 * for non default SB; each even line in the memory
5725 * holds the U producer and each odd line hold
5726 * the C producer. The first 128 producers are for
5727 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5728 * producers are for the DSB for each PF.
5729 * Each PF has five segments: (the order inside each
5730 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5731 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5732 * 144-147 attn prods;
5733 */
5734 /* non-default-status-blocks */
5735 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5736 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5737 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5738 prod_offset = (bp->igu_base_sb + sb_idx) *
5739 num_segs;
5740
5741 for (i = 0; i < num_segs; i++) {
5742 addr = IGU_REG_PROD_CONS_MEMORY +
5743 (prod_offset + i) * 4;
5744 REG_WR(bp, addr, 0);
5745 }
5746 /* send consumer update with value 0 */
5747 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5748 USTORM_ID, 0, IGU_INT_NOP, 1);
5749 bnx2x_igu_clear_sb(bp,
5750 bp->igu_base_sb + sb_idx);
5751 }
5752
5753 /* default-status-blocks */
5754 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5755 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5756
5757 if (CHIP_MODE_IS_4_PORT(bp))
5758 dsb_idx = BP_FUNC(bp);
5759 else
5760 dsb_idx = BP_E1HVN(bp);
5761
5762 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5763 IGU_BC_BASE_DSB_PROD + dsb_idx :
5764 IGU_NORM_BASE_DSB_PROD + dsb_idx);
5765
5766 for (i = 0; i < (num_segs * E1HVN_MAX);
5767 i += E1HVN_MAX) {
5768 addr = IGU_REG_PROD_CONS_MEMORY +
5769 (prod_offset + i)*4;
5770 REG_WR(bp, addr, 0);
5771 }
5772 /* send consumer update with 0 */
5773 if (CHIP_INT_MODE_IS_BC(bp)) {
5774 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5775 USTORM_ID, 0, IGU_INT_NOP, 1);
5776 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5777 CSTORM_ID, 0, IGU_INT_NOP, 1);
5778 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5779 XSTORM_ID, 0, IGU_INT_NOP, 1);
5780 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5781 TSTORM_ID, 0, IGU_INT_NOP, 1);
5782 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5783 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5784 } else {
5785 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5786 USTORM_ID, 0, IGU_INT_NOP, 1);
5787 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5788 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5789 }
5790 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5791
5792 /* !!! these should become driver const once
5793 rf-tool supports split-68 const */
5794 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5795 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5796 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5797 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5798 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5799 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5800 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005801 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005802
Eliezer Tamirc14423f2008-02-28 11:49:42 -08005803 /* Reset PCIE errors for debug */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005804 REG_WR(bp, 0x2114, 0xffffffff);
5805 REG_WR(bp, 0x2120, 0xffffffff);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005806
5807 bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5808 bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5809 bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5810 bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5811 bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5812 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5813
Vladislav Zolotarovf4a66892010-10-19 05:13:09 +00005814 if (CHIP_IS_E1x(bp)) {
5815 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
5816 main_mem_base = HC_REG_MAIN_MEMORY +
5817 BP_PORT(bp) * (main_mem_size * 4);
5818 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
5819 main_mem_width = 8;
5820
5821 val = REG_RD(bp, main_mem_prty_clr);
5822 if (val)
5823 DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
5824 "block during "
5825 "function init (0x%x)!\n", val);
5826
5827 /* Clear "false" parity errors in MSI-X table */
5828 for (i = main_mem_base;
5829 i < main_mem_base + main_mem_size * 4;
5830 i += main_mem_width) {
5831 bnx2x_read_dmae(bp, i, main_mem_width / 4);
5832 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
5833 i, main_mem_width / 4);
5834 }
5835 /* Clear HC parity attention */
5836 REG_RD(bp, main_mem_prty_clr);
5837 }
5838
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00005839 bnx2x_phy_probe(&bp->link_params);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005840
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005841 return 0;
5842}
5843
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005844int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005845{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005846 int rc = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005847
5848 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005849 BP_ABS_FUNC(bp), load_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005850
5851 bp->dmae_ready = 0;
5852 mutex_init(&bp->dmae_mutex);
Eilon Greenstein54016b22009-08-12 08:23:48 +00005853 rc = bnx2x_gunzip_init(bp);
5854 if (rc)
5855 return rc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005856
5857 switch (load_code) {
5858 case FW_MSG_CODE_DRV_LOAD_COMMON:
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005859 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005860 rc = bnx2x_init_hw_common(bp, load_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005861 if (rc)
5862 goto init_hw_err;
5863 /* no break */
5864
5865 case FW_MSG_CODE_DRV_LOAD_PORT:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005866 rc = bnx2x_init_hw_port(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005867 if (rc)
5868 goto init_hw_err;
5869 /* no break */
5870
5871 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005872 rc = bnx2x_init_hw_func(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005873 if (rc)
5874 goto init_hw_err;
5875 break;
5876
5877 default:
5878 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5879 break;
5880 }
5881
5882 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005883 int mb_idx = BP_FW_MB_IDX(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005884
5885 bp->fw_drv_pulse_wr_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005886 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005887 DRV_PULSE_SEQ_MASK);
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00005888 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5889 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005890
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005891init_hw_err:
5892 bnx2x_gunzip_end(bp);
5893
5894 return rc;
5895}
5896
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005897void bnx2x_free_mem(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005898{
5899
5900#define BNX2X_PCI_FREE(x, y, size) \
5901 do { \
5902 if (x) { \
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005903 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005904 x = NULL; \
5905 y = 0; \
5906 } \
5907 } while (0)
5908
5909#define BNX2X_FREE(x) \
5910 do { \
5911 if (x) { \
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005912 kfree((void *)x); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005913 x = NULL; \
5914 } \
5915 } while (0)
5916
5917 int i;
5918
5919 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005920 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005921 for_each_queue(bp, i) {
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00005922#ifdef BCM_CNIC
5923 /* FCoE client uses default status block */
5924 if (IS_FCOE_IDX(i)) {
5925 union host_hc_status_block *sb =
5926 &bnx2x_fp(bp, i, status_blk);
5927 memset(sb, 0, sizeof(union host_hc_status_block));
5928 bnx2x_fp(bp, i, status_blk_mapping) = 0;
5929 } else {
5930#endif
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005931 /* status blocks */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005932 if (CHIP_IS_E2(bp))
5933 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
5934 bnx2x_fp(bp, i, status_blk_mapping),
5935 sizeof(struct host_hc_status_block_e2));
5936 else
5937 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
5938 bnx2x_fp(bp, i, status_blk_mapping),
5939 sizeof(struct host_hc_status_block_e1x));
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00005940#ifdef BCM_CNIC
5941 }
5942#endif
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005943 }
5944 /* Rx */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00005945 for_each_rx_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005946
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005947 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005948 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5949 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5950 bnx2x_fp(bp, i, rx_desc_mapping),
5951 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5952
5953 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5954 bnx2x_fp(bp, i, rx_comp_mapping),
5955 sizeof(struct eth_fast_path_rx_cqe) *
5956 NUM_RCQ_BD);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005957
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005958 /* SGE ring */
Eilon Greenstein32626232008-08-13 15:51:07 -07005959 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005960 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5961 bnx2x_fp(bp, i, rx_sge_mapping),
5962 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5963 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005964 /* Tx */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00005965 for_each_tx_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005966
5967 /* fastpath tx rings: tx_buf tx_desc */
5968 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5969 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5970 bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07005971 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005972 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005973 /* end of fastpath */
5974
5975 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005976 sizeof(struct host_sp_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005977
5978 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005979 sizeof(struct bnx2x_slowpath));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005980
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005981 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5982 bp->context.size);
5983
5984 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5985
5986 BNX2X_FREE(bp->ilt->lines);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005987
Michael Chan37b091b2009-10-10 13:46:55 +00005988#ifdef BCM_CNIC
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005989 if (CHIP_IS_E2(bp))
5990 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
5991 sizeof(struct host_hc_status_block_e2));
5992 else
5993 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5994 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005995
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005996 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005997#endif
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005998
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005999 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006000
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006001 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
6002 BCM_PAGE_SIZE * NUM_EQ_PAGES);
6003
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006004#undef BNX2X_PCI_FREE
6005#undef BNX2X_KFREE
6006}
6007
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006008static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
6009{
6010 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
6011 if (CHIP_IS_E2(bp)) {
6012 bnx2x_fp(bp, index, sb_index_values) =
6013 (__le16 *)status_blk.e2_sb->sb.index_values;
6014 bnx2x_fp(bp, index, sb_running_index) =
6015 (__le16 *)status_blk.e2_sb->sb.running_index;
6016 } else {
6017 bnx2x_fp(bp, index, sb_index_values) =
6018 (__le16 *)status_blk.e1x_sb->sb.index_values;
6019 bnx2x_fp(bp, index, sb_running_index) =
6020 (__le16 *)status_blk.e1x_sb->sb.running_index;
6021 }
6022}
6023
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00006024int bnx2x_alloc_mem(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006025{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006026#define BNX2X_PCI_ALLOC(x, y, size) \
6027 do { \
FUJITA Tomonori1a983142010-04-04 01:51:03 +00006028 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006029 if (x == NULL) \
6030 goto alloc_mem_err; \
6031 memset(x, 0, size); \
6032 } while (0)
6033
6034#define BNX2X_ALLOC(x, size) \
6035 do { \
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006036 x = kzalloc(size, GFP_KERNEL); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006037 if (x == NULL) \
6038 goto alloc_mem_err; \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006039 } while (0)
6040
6041 int i;
6042
6043 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006044 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006045 for_each_queue(bp, i) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006046 union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006047 bnx2x_fp(bp, i, bp) = bp;
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006048 /* status blocks */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006049#ifdef BCM_CNIC
6050 if (!IS_FCOE_IDX(i)) {
6051#endif
6052 if (CHIP_IS_E2(bp))
6053 BNX2X_PCI_ALLOC(sb->e2_sb,
6054 &bnx2x_fp(bp, i, status_blk_mapping),
6055 sizeof(struct host_hc_status_block_e2));
6056 else
6057 BNX2X_PCI_ALLOC(sb->e1x_sb,
6058 &bnx2x_fp(bp, i, status_blk_mapping),
6059 sizeof(struct host_hc_status_block_e1x));
6060#ifdef BCM_CNIC
6061 }
6062#endif
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006063 set_sb_shortcuts(bp, i);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006064 }
6065 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006066 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006067
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006068 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006069 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6070 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6071 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6072 &bnx2x_fp(bp, i, rx_desc_mapping),
6073 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6074
6075 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6076 &bnx2x_fp(bp, i, rx_comp_mapping),
6077 sizeof(struct eth_fast_path_rx_cqe) *
6078 NUM_RCQ_BD);
6079
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006080 /* SGE ring */
6081 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6082 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6083 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6084 &bnx2x_fp(bp, i, rx_sge_mapping),
6085 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006086 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006087 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006088 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006089
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006090 /* fastpath tx rings: tx_buf tx_desc */
6091 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6092 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6093 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6094 &bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07006095 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006096 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006097 /* end of fastpath */
6098
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006099#ifdef BCM_CNIC
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006100 if (CHIP_IS_E2(bp))
6101 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
6102 sizeof(struct host_hc_status_block_e2));
6103 else
6104 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
6105 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006106
6107 /* allocate searcher T2 table */
6108 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
6109#endif
6110
6111
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006112 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006113 sizeof(struct host_sp_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006114
6115 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6116 sizeof(struct bnx2x_slowpath));
6117
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006118 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006119
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006120 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
6121 bp->context.size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006122
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006123 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006124
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006125 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
6126 goto alloc_mem_err;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006127
6128 /* Slow path ring */
6129 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6130
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006131 /* EQ */
6132 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
6133 BCM_PAGE_SIZE * NUM_EQ_PAGES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006134 return 0;
6135
6136alloc_mem_err:
6137 bnx2x_free_mem(bp);
6138 return -ENOMEM;
6139
6140#undef BNX2X_PCI_ALLOC
6141#undef BNX2X_ALLOC
6142}
6143
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006144/*
6145 * Init service functions
6146 */
stephen hemminger8d962862010-10-21 07:50:56 +00006147static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6148 int *state_p, int flags);
6149
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006150int bnx2x_func_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006151{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006152 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006153
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006154 /* Wait for completion */
6155 return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
6156 WAIT_RAMROD_COMMON);
6157}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006158
stephen hemminger8d962862010-10-21 07:50:56 +00006159static int bnx2x_func_stop(struct bnx2x *bp)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006160{
6161 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006162
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006163 /* Wait for completion */
6164 return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
6165 0, &(bp->state), WAIT_RAMROD_COMMON);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006166}
6167
Michael Chane665bfd2009-10-10 13:46:54 +00006168/**
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006169 * Sets a MAC in a CAM for a few L2 Clients for E1x chips
Michael Chane665bfd2009-10-10 13:46:54 +00006170 *
6171 * @param bp driver descriptor
6172 * @param set set or clear an entry (1 or 0)
6173 * @param mac pointer to a buffer containing a MAC
6174 * @param cl_bit_vec bit vector of clients to register a MAC for
6175 * @param cam_offset offset in a CAM to use
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006176 * @param is_bcast is the set MAC a broadcast address (for E1 only)
Michael Chane665bfd2009-10-10 13:46:54 +00006177 */
Joe Perches215faf92010-12-21 02:16:10 -08006178static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006179 u32 cl_bit_vec, u8 cam_offset,
6180 u8 is_bcast)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006181{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006182 struct mac_configuration_cmd *config =
6183 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
6184 int ramrod_flags = WAIT_RAMROD_COMMON;
6185
6186 bp->set_mac_pending = 1;
6187 smp_wmb();
6188
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08006189 config->hdr.length = 1;
Michael Chane665bfd2009-10-10 13:46:54 +00006190 config->hdr.offset = cam_offset;
6191 config->hdr.client_id = 0xff;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006192 config->hdr.reserved1 = 0;
6193
6194 /* primary MAC */
6195 config->config_table[0].msb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00006196 swab16(*(u16 *)&mac[0]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006197 config->config_table[0].middle_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00006198 swab16(*(u16 *)&mac[2]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006199 config->config_table[0].lsb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00006200 swab16(*(u16 *)&mac[4]);
Eilon Greensteinca003922009-08-12 22:53:28 -07006201 config->config_table[0].clients_bit_vector =
Michael Chane665bfd2009-10-10 13:46:54 +00006202 cpu_to_le32(cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006203 config->config_table[0].vlan_id = 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006204 config->config_table[0].pf_id = BP_FUNC(bp);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006205 if (set)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006206 SET_FLAG(config->config_table[0].flags,
6207 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6208 T_ETH_MAC_COMMAND_SET);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006209 else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006210 SET_FLAG(config->config_table[0].flags,
6211 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6212 T_ETH_MAC_COMMAND_INVALIDATE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006213
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006214 if (is_bcast)
6215 SET_FLAG(config->config_table[0].flags,
6216 MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
6217
6218 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006219 (set ? "setting" : "clearing"),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006220 config->config_table[0].msb_mac_addr,
6221 config->config_table[0].middle_mac_addr,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006222 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006223
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006224 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006225 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006226 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
6227
6228 /* Wait for a completion */
6229 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006230}
6231
stephen hemminger8d962862010-10-21 07:50:56 +00006232static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6233 int *state_p, int flags)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006234{
6235 /* can take a while if any port is running */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006236 int cnt = 5000;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006237 u8 poll = flags & WAIT_RAMROD_POLL;
6238 u8 common = flags & WAIT_RAMROD_COMMON;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006239
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006240 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6241 poll ? "polling" : "waiting", state, idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006242
6243 might_sleep();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006244 while (cnt--) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006245 if (poll) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006246 if (common)
6247 bnx2x_eq_int(bp);
6248 else {
6249 bnx2x_rx_int(bp->fp, 10);
6250 /* if index is different from 0
6251 * the reply for some commands will
6252 * be on the non default queue
6253 */
6254 if (idx)
6255 bnx2x_rx_int(&bp->fp[idx], 10);
6256 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006257 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006258
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006259 mb(); /* state is changed by bnx2x_sp_event() */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006260 if (*state_p == state) {
6261#ifdef BNX2X_STOP_ON_ERROR
6262 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6263#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006264 return 0;
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006265 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006266
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006267 msleep(1);
Eilon Greensteine3553b22009-08-12 08:23:31 +00006268
6269 if (bp->panic)
6270 return -EIO;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006271 }
6272
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006273 /* timeout! */
Eliezer Tamir49d66772008-02-28 11:53:13 -08006274 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6275 poll ? "polling" : "waiting", state, idx);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006276#ifdef BNX2X_STOP_ON_ERROR
6277 bnx2x_panic();
6278#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006279
Eliezer Tamir49d66772008-02-28 11:53:13 -08006280 return -EBUSY;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006281}
6282
stephen hemminger8d962862010-10-21 07:50:56 +00006283static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
Michael Chane665bfd2009-10-10 13:46:54 +00006284{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006285 if (CHIP_IS_E1H(bp))
6286 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6287 else if (CHIP_MODE_IS_4_PORT(bp))
6288 return BP_FUNC(bp) * 32 + rel_offset;
6289 else
6290 return BP_VN(bp) * 32 + rel_offset;
Michael Chane665bfd2009-10-10 13:46:54 +00006291}
6292
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08006293/**
6294 * LLH CAM line allocations: currently only iSCSI and ETH macs are
6295 * relevant. In addition, current implementation is tuned for a
6296 * single ETH MAC.
6297 *
6298 * When multiple unicast ETH MACs PF configuration in switch
6299 * independent mode is required (NetQ, multiple netdev MACs,
6300 * etc.), consider better utilisation of 16 per function MAC
6301 * entries in the LLH memory.
6302 */
6303enum {
6304 LLH_CAM_ISCSI_ETH_LINE = 0,
6305 LLH_CAM_ETH_LINE,
6306 LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE
6307};
6308
6309static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
6310 int set,
6311 unsigned char *dev_addr,
6312 int index)
6313{
6314 u32 wb_data[2];
6315 u32 mem_offset, ena_offset, mem_index;
6316 /**
6317 * indexes mapping:
6318 * 0..7 - goes to MEM
6319 * 8..15 - goes to MEM2
6320 */
6321
6322 if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
6323 return;
6324
6325 /* calculate memory start offset according to the mapping
6326 * and index in the memory */
6327 if (index < NIG_LLH_FUNC_MEM_MAX_OFFSET) {
6328 mem_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
6329 NIG_REG_LLH0_FUNC_MEM;
6330 ena_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
6331 NIG_REG_LLH0_FUNC_MEM_ENABLE;
6332 mem_index = index;
6333 } else {
6334 mem_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2 :
6335 NIG_REG_P0_LLH_FUNC_MEM2;
6336 ena_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE :
6337 NIG_REG_P0_LLH_FUNC_MEM2_ENABLE;
6338 mem_index = index - NIG_LLH_FUNC_MEM_MAX_OFFSET;
6339 }
6340
6341 if (set) {
6342 /* LLH_FUNC_MEM is a u64 WB register */
6343 mem_offset += 8*mem_index;
6344
6345 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
6346 (dev_addr[4] << 8) | dev_addr[5]);
6347 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
6348
6349 REG_WR_DMAE(bp, mem_offset, wb_data, 2);
6350 }
6351
6352 /* enable/disable the entry */
6353 REG_WR(bp, ena_offset + 4*mem_index, set);
6354
6355}
6356
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006357void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
Michael Chane665bfd2009-10-10 13:46:54 +00006358{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006359 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
6360 bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
6361
6362 /* networking MAC */
6363 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6364 (1 << bp->fp->cl_id), cam_offset , 0);
6365
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08006366 bnx2x_set_mac_in_nig(bp, set, bp->dev->dev_addr, LLH_CAM_ETH_LINE);
6367
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006368 if (CHIP_IS_E1(bp)) {
6369 /* broadcast MAC */
Joe Perches215faf92010-12-21 02:16:10 -08006370 static const u8 bcast[ETH_ALEN] = {
6371 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
6372 };
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006373 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6374 }
6375}
6376static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
6377{
6378 int i = 0, old;
6379 struct net_device *dev = bp->dev;
6380 struct netdev_hw_addr *ha;
6381 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6382 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6383
6384 netdev_for_each_mc_addr(ha, dev) {
6385 /* copy mac */
6386 config_cmd->config_table[i].msb_mac_addr =
6387 swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
6388 config_cmd->config_table[i].middle_mac_addr =
6389 swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
6390 config_cmd->config_table[i].lsb_mac_addr =
6391 swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
6392
6393 config_cmd->config_table[i].vlan_id = 0;
6394 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
6395 config_cmd->config_table[i].clients_bit_vector =
6396 cpu_to_le32(1 << BP_L_ID(bp));
6397
6398 SET_FLAG(config_cmd->config_table[i].flags,
6399 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6400 T_ETH_MAC_COMMAND_SET);
6401
6402 DP(NETIF_MSG_IFUP,
6403 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6404 config_cmd->config_table[i].msb_mac_addr,
6405 config_cmd->config_table[i].middle_mac_addr,
6406 config_cmd->config_table[i].lsb_mac_addr);
6407 i++;
6408 }
6409 old = config_cmd->hdr.length;
6410 if (old > i) {
6411 for (; i < old; i++) {
6412 if (CAM_IS_INVALID(config_cmd->
6413 config_table[i])) {
6414 /* already invalidated */
6415 break;
6416 }
6417 /* invalidate */
6418 SET_FLAG(config_cmd->config_table[i].flags,
6419 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6420 T_ETH_MAC_COMMAND_INVALIDATE);
6421 }
6422 }
6423
6424 config_cmd->hdr.length = i;
6425 config_cmd->hdr.offset = offset;
6426 config_cmd->hdr.client_id = 0xff;
6427 config_cmd->hdr.reserved1 = 0;
6428
6429 bp->set_mac_pending = 1;
Michael Chane665bfd2009-10-10 13:46:54 +00006430 smp_wmb();
6431
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006432 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6433 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6434}
6435static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
6436{
6437 int i;
6438 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6439 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6440 int ramrod_flags = WAIT_RAMROD_COMMON;
6441
6442 bp->set_mac_pending = 1;
6443 smp_wmb();
6444
6445 for (i = 0; i < config_cmd->hdr.length; i++)
6446 SET_FLAG(config_cmd->config_table[i].flags,
6447 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6448 T_ETH_MAC_COMMAND_INVALIDATE);
6449
6450 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6451 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
Michael Chane665bfd2009-10-10 13:46:54 +00006452
6453 /* Wait for a completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006454 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
6455 ramrod_flags);
6456
Michael Chane665bfd2009-10-10 13:46:54 +00006457}
6458
Michael Chan993ac7b2009-10-10 13:46:56 +00006459#ifdef BCM_CNIC
6460/**
6461 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
6462 * MAC(s). This function will wait until the ramdord completion
6463 * returns.
6464 *
6465 * @param bp driver handle
6466 * @param set set or clear the CAM entry
6467 *
6468 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6469 */
stephen hemminger8d962862010-10-21 07:50:56 +00006470static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
Michael Chan993ac7b2009-10-10 13:46:56 +00006471{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006472 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
6473 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006474 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
6475 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006476 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
Michael Chan993ac7b2009-10-10 13:46:56 +00006477
6478 /* Send a SET_MAC ramrod */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006479 bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
6480 cam_offset, 0);
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08006481
6482 bnx2x_set_mac_in_nig(bp, set, bp->iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006483
6484 return 0;
6485}
6486
6487/**
6488 * Set FCoE L2 MAC(s) at the next enties in the CAM after the
6489 * ETH MAC(s). This function will wait until the ramdord
6490 * completion returns.
6491 *
6492 * @param bp driver handle
6493 * @param set set or clear the CAM entry
6494 *
6495 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6496 */
6497int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set)
6498{
6499 u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
6500 /**
6501 * CAM allocation for E1H
6502 * eth unicasts: by func number
6503 * iscsi: by func number
6504 * fip unicast: by func number
6505 * fip multicast: by func number
6506 */
6507 bnx2x_set_mac_addr_gen(bp, set, bp->fip_mac,
6508 cl_bit_vec, bnx2x_e1h_cam_offset(bp, CAM_FIP_ETH_LINE), 0);
6509
6510 return 0;
6511}
6512
6513int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set)
6514{
6515 u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
6516
6517 /**
6518 * CAM allocation for E1H
6519 * eth unicasts: by func number
6520 * iscsi: by func number
6521 * fip unicast: by func number
6522 * fip multicast: by func number
6523 */
6524 bnx2x_set_mac_addr_gen(bp, set, ALL_ENODE_MACS, cl_bit_vec,
6525 bnx2x_e1h_cam_offset(bp, CAM_FIP_MCAST_LINE), 0);
6526
Michael Chan993ac7b2009-10-10 13:46:56 +00006527 return 0;
6528}
6529#endif
6530
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006531static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
6532 struct bnx2x_client_init_params *params,
6533 u8 activate,
6534 struct client_init_ramrod_data *data)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006535{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006536 /* Clear the buffer */
6537 memset(data, 0, sizeof(*data));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006538
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006539 /* general */
6540 data->general.client_id = params->rxq_params.cl_id;
6541 data->general.statistics_counter_id = params->rxq_params.stat_id;
6542 data->general.statistics_en_flg =
6543 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006544 data->general.is_fcoe_flg =
6545 (params->ramrod_params.flags & CLIENT_IS_FCOE) ? 1 : 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006546 data->general.activate_flg = activate;
6547 data->general.sp_client_id = params->rxq_params.spcl_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006548
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006549 /* Rx data */
6550 data->rx.tpa_en_flg =
6551 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
6552 data->rx.vmqueue_mode_en_flg = 0;
6553 data->rx.cache_line_alignment_log_size =
6554 params->rxq_params.cache_line_log;
6555 data->rx.enable_dynamic_hc =
6556 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
6557 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
6558 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
6559 data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
6560
6561 /* We don't set drop flags */
6562 data->rx.drop_ip_cs_err_flg = 0;
6563 data->rx.drop_tcp_cs_err_flg = 0;
6564 data->rx.drop_ttl0_flg = 0;
6565 data->rx.drop_udp_cs_err_flg = 0;
6566
6567 data->rx.inner_vlan_removal_enable_flg =
6568 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
6569 data->rx.outer_vlan_removal_enable_flg =
6570 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
6571 data->rx.status_block_id = params->rxq_params.fw_sb_id;
6572 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
6573 data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
6574 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
6575 data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
6576 data->rx.bd_page_base.lo =
6577 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
6578 data->rx.bd_page_base.hi =
6579 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
6580 data->rx.sge_page_base.lo =
6581 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
6582 data->rx.sge_page_base.hi =
6583 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
6584 data->rx.cqe_page_base.lo =
6585 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
6586 data->rx.cqe_page_base.hi =
6587 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
6588 data->rx.is_leading_rss =
6589 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
6590 data->rx.is_approx_mcast = data->rx.is_leading_rss;
6591
6592 /* Tx data */
6593 data->tx.enforce_security_flg = 0; /* VF specific */
6594 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
6595 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
6596 data->tx.mtu = 0; /* VF specific */
6597 data->tx.tx_bd_page_base.lo =
6598 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
6599 data->tx.tx_bd_page_base.hi =
6600 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
6601
6602 /* flow control data */
6603 data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
6604 data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
6605 data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
6606 data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
6607 data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
6608 data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
6609 data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
6610
6611 data->fc.safc_group_num = params->txq_params.cos;
6612 data->fc.safc_group_en_flg =
6613 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006614 data->fc.traffic_type =
6615 (params->ramrod_params.flags & CLIENT_IS_FCOE) ?
6616 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006617}
6618
6619static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6620{
6621 /* ustorm cxt validation */
6622 cxt->ustorm_ag_context.cdu_usage =
6623 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
6624 ETH_CONNECTION_TYPE);
6625 /* xcontext validation */
6626 cxt->xstorm_ag_context.cdu_reserved =
6627 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
6628 ETH_CONNECTION_TYPE);
6629}
6630
stephen hemminger8d962862010-10-21 07:50:56 +00006631static int bnx2x_setup_fw_client(struct bnx2x *bp,
6632 struct bnx2x_client_init_params *params,
6633 u8 activate,
6634 struct client_init_ramrod_data *data,
6635 dma_addr_t data_mapping)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006636{
6637 u16 hc_usec;
6638 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
6639 int ramrod_flags = 0, rc;
6640
6641 /* HC and context validation values */
6642 hc_usec = params->txq_params.hc_rate ?
6643 1000000 / params->txq_params.hc_rate : 0;
6644 bnx2x_update_coalesce_sb_index(bp,
6645 params->txq_params.fw_sb_id,
6646 params->txq_params.sb_cq_index,
6647 !(params->txq_params.flags & QUEUE_FLG_HC),
6648 hc_usec);
6649
6650 *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
6651
6652 hc_usec = params->rxq_params.hc_rate ?
6653 1000000 / params->rxq_params.hc_rate : 0;
6654 bnx2x_update_coalesce_sb_index(bp,
6655 params->rxq_params.fw_sb_id,
6656 params->rxq_params.sb_cq_index,
6657 !(params->rxq_params.flags & QUEUE_FLG_HC),
6658 hc_usec);
6659
6660 bnx2x_set_ctx_validation(params->rxq_params.cxt,
6661 params->rxq_params.cid);
6662
6663 /* zero stats */
6664 if (params->txq_params.flags & QUEUE_FLG_STATS)
6665 storm_memset_xstats_zero(bp, BP_PORT(bp),
6666 params->txq_params.stat_id);
6667
6668 if (params->rxq_params.flags & QUEUE_FLG_STATS) {
6669 storm_memset_ustats_zero(bp, BP_PORT(bp),
6670 params->rxq_params.stat_id);
6671 storm_memset_tstats_zero(bp, BP_PORT(bp),
6672 params->rxq_params.stat_id);
6673 }
6674
6675 /* Fill the ramrod data */
6676 bnx2x_fill_cl_init_data(bp, params, activate, data);
6677
6678 /* SETUP ramrod.
6679 *
6680 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6681 * barrier except from mmiowb() is needed to impose a
6682 * proper ordering of memory operations.
6683 */
6684 mmiowb();
6685
6686
6687 bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
6688 U64_HI(data_mapping), U64_LO(data_mapping), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006689
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006690 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006691 rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
6692 params->ramrod_params.index,
6693 params->ramrod_params.pstate,
6694 ramrod_flags);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006695 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006696}
6697
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006698/**
6699 * Configure interrupt mode according to current configuration.
6700 * In case of MSI-X it will also try to enable MSI-X.
6701 *
6702 * @param bp
6703 *
6704 * @return int
6705 */
6706static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006707{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006708 int rc = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -07006709
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006710 switch (bp->int_mode) {
6711 case INT_MODE_MSI:
6712 bnx2x_enable_msi(bp);
6713 /* falling through... */
6714 case INT_MODE_INTx:
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006715 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006716 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
Eilon Greensteinca003922009-08-12 22:53:28 -07006717 break;
Eilon Greensteinca003922009-08-12 22:53:28 -07006718 default:
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006719 /* Set number of queues according to bp->multi_mode value */
6720 bnx2x_set_num_queues(bp);
6721
6722 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
6723 bp->num_queues);
6724
6725 /* if we can't use MSI-X we only need one fp,
6726 * so try to enable MSI-X with the requested number of fp's
6727 * and fallback to MSI or legacy INTx with one fp
6728 */
6729 rc = bnx2x_enable_msix(bp);
6730 if (rc) {
6731 /* failed to enable MSI-X */
6732 if (bp->multi_mode)
6733 DP(NETIF_MSG_IFUP,
6734 "Multi requested but failed to "
6735 "enable MSI-X (%d), "
6736 "set number of queues to %d\n",
6737 bp->num_queues,
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006738 1 + NONE_ETH_CONTEXT_USE);
6739 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006740
6741 if (!(bp->flags & DISABLE_MSI_FLAG))
6742 bnx2x_enable_msi(bp);
6743 }
6744
Eilon Greensteinca003922009-08-12 22:53:28 -07006745 break;
6746 }
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006747
6748 return rc;
Eilon Greensteinca003922009-08-12 22:53:28 -07006749}
6750
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00006751/* must be called prioir to any HW initializations */
6752static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
6753{
6754 return L2_ILT_LINES(bp);
6755}
6756
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006757void bnx2x_ilt_set_info(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006758{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006759 struct ilt_client_info *ilt_client;
6760 struct bnx2x_ilt *ilt = BP_ILT(bp);
6761 u16 line = 0;
6762
6763 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
6764 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
6765
6766 /* CDU */
6767 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6768 ilt_client->client_num = ILT_CLIENT_CDU;
6769 ilt_client->page_size = CDU_ILT_PAGE_SZ;
6770 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6771 ilt_client->start = line;
6772 line += L2_ILT_LINES(bp);
6773#ifdef BCM_CNIC
6774 line += CNIC_ILT_LINES;
6775#endif
6776 ilt_client->end = line - 1;
6777
6778 DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6779 "flags 0x%x, hw psz %d\n",
6780 ilt_client->start,
6781 ilt_client->end,
6782 ilt_client->page_size,
6783 ilt_client->flags,
6784 ilog2(ilt_client->page_size >> 12));
6785
6786 /* QM */
6787 if (QM_INIT(bp->qm_cid_count)) {
6788 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6789 ilt_client->client_num = ILT_CLIENT_QM;
6790 ilt_client->page_size = QM_ILT_PAGE_SZ;
6791 ilt_client->flags = 0;
6792 ilt_client->start = line;
6793
6794 /* 4 bytes for each cid */
6795 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6796 QM_ILT_PAGE_SZ);
6797
6798 ilt_client->end = line - 1;
6799
6800 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6801 "flags 0x%x, hw psz %d\n",
6802 ilt_client->start,
6803 ilt_client->end,
6804 ilt_client->page_size,
6805 ilt_client->flags,
6806 ilog2(ilt_client->page_size >> 12));
6807
6808 }
6809 /* SRC */
6810 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6811#ifdef BCM_CNIC
6812 ilt_client->client_num = ILT_CLIENT_SRC;
6813 ilt_client->page_size = SRC_ILT_PAGE_SZ;
6814 ilt_client->flags = 0;
6815 ilt_client->start = line;
6816 line += SRC_ILT_LINES;
6817 ilt_client->end = line - 1;
6818
6819 DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6820 "flags 0x%x, hw psz %d\n",
6821 ilt_client->start,
6822 ilt_client->end,
6823 ilt_client->page_size,
6824 ilt_client->flags,
6825 ilog2(ilt_client->page_size >> 12));
6826
6827#else
6828 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6829#endif
6830
6831 /* TM */
6832 ilt_client = &ilt->clients[ILT_CLIENT_TM];
6833#ifdef BCM_CNIC
6834 ilt_client->client_num = ILT_CLIENT_TM;
6835 ilt_client->page_size = TM_ILT_PAGE_SZ;
6836 ilt_client->flags = 0;
6837 ilt_client->start = line;
6838 line += TM_ILT_LINES;
6839 ilt_client->end = line - 1;
6840
6841 DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6842 "flags 0x%x, hw psz %d\n",
6843 ilt_client->start,
6844 ilt_client->end,
6845 ilt_client->page_size,
6846 ilt_client->flags,
6847 ilog2(ilt_client->page_size >> 12));
6848
6849#else
6850 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6851#endif
6852}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006853
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006854int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6855 int is_leading)
6856{
6857 struct bnx2x_client_init_params params = { {0} };
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006858 int rc;
6859
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006860 /* reset IGU state skip FCoE L2 queue */
6861 if (!IS_FCOE_FP(fp))
6862 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006863 IGU_INT_ENABLE, 0);
6864
6865 params.ramrod_params.pstate = &fp->state;
6866 params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
6867 params.ramrod_params.index = fp->index;
6868 params.ramrod_params.cid = fp->cid;
6869
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006870#ifdef BCM_CNIC
6871 if (IS_FCOE_FP(fp))
6872 params.ramrod_params.flags |= CLIENT_IS_FCOE;
6873
6874#endif
6875
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006876 if (is_leading)
6877 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
6878
6879 bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
6880
6881 bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
6882
6883 rc = bnx2x_setup_fw_client(bp, &params, 1,
6884 bnx2x_sp(bp, client_init_data),
6885 bnx2x_sp_mapping(bp, client_init_data));
6886 return rc;
6887}
6888
stephen hemminger8d962862010-10-21 07:50:56 +00006889static int bnx2x_stop_fw_client(struct bnx2x *bp,
6890 struct bnx2x_client_ramrod_params *p)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006891{
6892 int rc;
6893
6894 int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
6895
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006896 /* halt the connection */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006897 *p->pstate = BNX2X_FP_STATE_HALTING;
6898 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
6899 p->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006900
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006901 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006902 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
6903 p->pstate, poll_flag);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006904 if (rc) /* timeout */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006905 return rc;
6906
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006907 *p->pstate = BNX2X_FP_STATE_TERMINATING;
6908 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
6909 p->cl_id, 0);
6910 /* Wait for completion */
6911 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
6912 p->pstate, poll_flag);
6913 if (rc) /* timeout */
6914 return rc;
6915
6916
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006917 /* delete cfc entry */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006918 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006919
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006920 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006921 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
6922 p->pstate, WAIT_RAMROD_COMMON);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006923 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006924}
6925
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006926static int bnx2x_stop_client(struct bnx2x *bp, int index)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006927{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006928 struct bnx2x_client_ramrod_params client_stop = {0};
6929 struct bnx2x_fastpath *fp = &bp->fp[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006930
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006931 client_stop.index = index;
6932 client_stop.cid = fp->cid;
6933 client_stop.cl_id = fp->cl_id;
6934 client_stop.pstate = &(fp->state);
6935 client_stop.poll = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006936
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006937 return bnx2x_stop_fw_client(bp, &client_stop);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006938}
6939
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006940
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006941static void bnx2x_reset_func(struct bnx2x *bp)
6942{
6943 int port = BP_PORT(bp);
6944 int func = BP_FUNC(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006945 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006946 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006947 (CHIP_IS_E2(bp) ?
6948 offsetof(struct hc_status_block_data_e2, common) :
6949 offsetof(struct hc_status_block_data_e1x, common));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006950 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
6951 int pfid_offset = offsetof(struct pci_entity, pf_id);
6952
6953 /* Disable the function in the FW */
6954 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
6955 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
6956 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
6957 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
6958
6959 /* FP SBs */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006960 for_each_eth_queue(bp, i) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006961 struct bnx2x_fastpath *fp = &bp->fp[i];
6962 REG_WR8(bp,
6963 BAR_CSTRORM_INTMEM +
6964 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
6965 + pfunc_offset_fp + pfid_offset,
6966 HC_FUNCTION_DISABLED);
6967 }
6968
6969 /* SP SB */
6970 REG_WR8(bp,
6971 BAR_CSTRORM_INTMEM +
6972 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
6973 pfunc_offset_sp + pfid_offset,
6974 HC_FUNCTION_DISABLED);
6975
6976
6977 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
6978 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
6979 0);
Eliezer Tamir49d66772008-02-28 11:53:13 -08006980
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006981 /* Configure IGU */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006982 if (bp->common.int_block == INT_BLOCK_HC) {
6983 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6984 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6985 } else {
6986 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
6987 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
6988 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006989
Michael Chan37b091b2009-10-10 13:46:55 +00006990#ifdef BCM_CNIC
6991 /* Disable Timer scan */
6992 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
6993 /*
6994 * Wait for at least 10ms and up to 2 second for the timers scan to
6995 * complete
6996 */
6997 for (i = 0; i < 200; i++) {
6998 msleep(10);
6999 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7000 break;
7001 }
7002#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007003 /* Clear ILT */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007004 bnx2x_clear_func_ilt(bp, func);
7005
7006 /* Timers workaround bug for E2: if this is vnic-3,
7007 * we need to set the entire ilt range for this timers.
7008 */
7009 if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
7010 struct ilt_client_info ilt_cli;
7011 /* use dummy TM client */
7012 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
7013 ilt_cli.start = 0;
7014 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
7015 ilt_cli.client_num = ILT_CLIENT_TM;
7016
7017 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
7018 }
7019
7020 /* this assumes that reset_port() called before reset_func()*/
7021 if (CHIP_IS_E2(bp))
7022 bnx2x_pf_disable(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007023
7024 bp->dmae_ready = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007025}
7026
7027static void bnx2x_reset_port(struct bnx2x *bp)
7028{
7029 int port = BP_PORT(bp);
7030 u32 val;
7031
7032 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7033
7034 /* Do not rcv packets to BRB */
7035 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7036 /* Do not direct rcv packets that are not for MCP to the BRB */
7037 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7038 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7039
7040 /* Configure AEU */
7041 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7042
7043 msleep(100);
7044 /* Check for BRB port occupancy */
7045 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7046 if (val)
7047 DP(NETIF_MSG_IFDOWN,
Eilon Greenstein33471622008-08-13 15:59:08 -07007048 "BRB1 is not empty %d blocks are occupied\n", val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007049
7050 /* TODO: Close Doorbell port? */
7051}
7052
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007053static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7054{
7055 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007056 BP_ABS_FUNC(bp), reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007057
7058 switch (reset_code) {
7059 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7060 bnx2x_reset_port(bp);
7061 bnx2x_reset_func(bp);
7062 bnx2x_reset_common(bp);
7063 break;
7064
7065 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7066 bnx2x_reset_port(bp);
7067 bnx2x_reset_func(bp);
7068 break;
7069
7070 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7071 bnx2x_reset_func(bp);
7072 break;
7073
7074 default:
7075 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7076 break;
7077 }
7078}
7079
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00007080#ifdef BCM_CNIC
7081static inline void bnx2x_del_fcoe_eth_macs(struct bnx2x *bp)
7082{
7083 if (bp->flags & FCOE_MACS_SET) {
7084 if (!IS_MF_SD(bp))
7085 bnx2x_set_fip_eth_mac_addr(bp, 0);
7086
7087 bnx2x_set_all_enode_macs(bp, 0);
7088
7089 bp->flags &= ~FCOE_MACS_SET;
7090 }
7091}
7092#endif
7093
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00007094void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007095{
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007096 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007097 u32 reset_code = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007098 int i, cnt, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007099
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007100 /* Wait until tx fastpath tasks complete */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00007101 for_each_tx_queue(bp, i) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08007102 struct bnx2x_fastpath *fp = &bp->fp[i];
7103
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007104 cnt = 1000;
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -08007105 while (bnx2x_has_tx_work_unload(fp)) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007106
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007107 if (!cnt) {
7108 BNX2X_ERR("timeout waiting for queue[%d]\n",
7109 i);
7110#ifdef BNX2X_STOP_ON_ERROR
7111 bnx2x_panic();
7112 return -EBUSY;
7113#else
7114 break;
7115#endif
7116 }
7117 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007118 msleep(1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007119 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08007120 }
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007121 /* Give HW time to discard old tx messages */
7122 msleep(1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007123
Yitchak Gertner65abd742008-08-25 15:26:24 -07007124 if (CHIP_IS_E1(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007125 /* invalidate mc list,
7126 * wait and poll (interrupts are off)
7127 */
7128 bnx2x_invlidate_e1_mc_list(bp);
7129 bnx2x_set_eth_mac(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007130
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007131 } else {
Yitchak Gertner65abd742008-08-25 15:26:24 -07007132 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7133
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007134 bnx2x_set_eth_mac(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007135
7136 for (i = 0; i < MC_HASH_SIZE; i++)
7137 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7138 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007139
Michael Chan993ac7b2009-10-10 13:46:56 +00007140#ifdef BCM_CNIC
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00007141 bnx2x_del_fcoe_eth_macs(bp);
Michael Chan993ac7b2009-10-10 13:46:56 +00007142#endif
Yitchak Gertner65abd742008-08-25 15:26:24 -07007143
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007144 if (unload_mode == UNLOAD_NORMAL)
7145 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007146
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00007147 else if (bp->flags & NO_WOL_FLAG)
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007148 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007149
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00007150 else if (bp->wol) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007151 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007152 u8 *mac_addr = bp->dev->dev_addr;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007153 u32 val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007154 /* The mac address is written to entries 1-4 to
7155 preserve entry 0 which is used by the PMF */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007156 u8 entry = (BP_E1HVN(bp) + 1)*8;
7157
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007158 val = (mac_addr[0] << 8) | mac_addr[1];
Eilon Greenstein3196a882008-08-13 15:58:49 -07007159 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007160
7161 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7162 (mac_addr[4] << 8) | mac_addr[5];
Eilon Greenstein3196a882008-08-13 15:58:49 -07007163 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007164
7165 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007166
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007167 } else
7168 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7169
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007170 /* Close multi and leading connections
7171 Completions for ramrods are collected in a synchronous way */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007172 for_each_queue(bp, i)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007173
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007174 if (bnx2x_stop_client(bp, i))
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007175#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007176 return;
7177#else
7178 goto unload_error;
7179#endif
7180
7181 rc = bnx2x_func_stop(bp);
7182 if (rc) {
7183 BNX2X_ERR("Function stop failed!\n");
7184#ifdef BNX2X_STOP_ON_ERROR
7185 return;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007186#else
7187 goto unload_error;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007188#endif
Eliezer Tamir228241e2008-02-28 11:56:57 -08007189 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007190#ifndef BNX2X_STOP_ON_ERROR
Eliezer Tamir228241e2008-02-28 11:56:57 -08007191unload_error:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007192#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007193 if (!BP_NOMCP(bp))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007194 reset_code = bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007195 else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007196 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
7197 "%d, %d, %d\n", BP_PATH(bp),
7198 load_count[BP_PATH(bp)][0],
7199 load_count[BP_PATH(bp)][1],
7200 load_count[BP_PATH(bp)][2]);
7201 load_count[BP_PATH(bp)][0]--;
7202 load_count[BP_PATH(bp)][1 + port]--;
7203 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
7204 "%d, %d, %d\n", BP_PATH(bp),
7205 load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
7206 load_count[BP_PATH(bp)][2]);
7207 if (load_count[BP_PATH(bp)][0] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007208 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007209 else if (load_count[BP_PATH(bp)][1 + port] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007210 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7211 else
7212 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7213 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007214
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007215 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7216 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7217 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007218
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007219 /* Disable HW interrupts, NAPI */
7220 bnx2x_netif_stop(bp, 1);
7221
7222 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00007223 bnx2x_free_irq(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007224
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007225 /* Reset the chip */
Eliezer Tamir228241e2008-02-28 11:56:57 -08007226 bnx2x_reset_chip(bp, reset_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007227
7228 /* Report UNLOAD_DONE to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007229 if (!BP_NOMCP(bp))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007230 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
Eilon Greenstein356e2382009-02-12 08:38:32 +00007231
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007232}
7233
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00007234void bnx2x_disable_close_the_gate(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007235{
7236 u32 val;
7237
7238 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
7239
7240 if (CHIP_IS_E1(bp)) {
7241 int port = BP_PORT(bp);
7242 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7243 MISC_REG_AEU_MASK_ATTN_FUNC_0;
7244
7245 val = REG_RD(bp, addr);
7246 val &= ~(0x300);
7247 REG_WR(bp, addr, val);
7248 } else if (CHIP_IS_E1H(bp)) {
7249 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
7250 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
7251 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
7252 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
7253 }
7254}
7255
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007256/* Close gates #2, #3 and #4: */
7257static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
7258{
7259 u32 val, addr;
7260
7261 /* Gates #2 and #4a are closed/opened for "not E1" only */
7262 if (!CHIP_IS_E1(bp)) {
7263 /* #4 */
7264 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
7265 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
7266 close ? (val | 0x1) : (val & (~(u32)1)));
7267 /* #2 */
7268 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
7269 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
7270 close ? (val | 0x1) : (val & (~(u32)1)));
7271 }
7272
7273 /* #3 */
7274 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
7275 val = REG_RD(bp, addr);
7276 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
7277
7278 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
7279 close ? "closing" : "opening");
7280 mmiowb();
7281}
7282
7283#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
7284
7285static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
7286{
7287 /* Do some magic... */
7288 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7289 *magic_val = val & SHARED_MF_CLP_MAGIC;
7290 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
7291}
7292
7293/* Restore the value of the `magic' bit.
7294 *
7295 * @param pdev Device handle.
7296 * @param magic_val Old value of the `magic' bit.
7297 */
7298static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
7299{
7300 /* Restore the `magic' bit value... */
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007301 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7302 MF_CFG_WR(bp, shared_mf_config.clp_mb,
7303 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
7304}
7305
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007306/**
7307 * Prepares for MCP reset: takes care of CLP configurations.
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007308 *
7309 * @param bp
7310 * @param magic_val Old value of 'magic' bit.
7311 */
7312static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
7313{
7314 u32 shmem;
7315 u32 validity_offset;
7316
7317 DP(NETIF_MSG_HW, "Starting\n");
7318
7319 /* Set `magic' bit in order to save MF config */
7320 if (!CHIP_IS_E1(bp))
7321 bnx2x_clp_reset_prep(bp, magic_val);
7322
7323 /* Get shmem offset */
7324 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7325 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7326
7327 /* Clear validity map flags */
7328 if (shmem > 0)
7329 REG_WR(bp, shmem + validity_offset, 0);
7330}
7331
7332#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
7333#define MCP_ONE_TIMEOUT 100 /* 100 ms */
7334
7335/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
7336 * depending on the HW type.
7337 *
7338 * @param bp
7339 */
7340static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
7341{
7342 /* special handling for emulation and FPGA,
7343 wait 10 times longer */
7344 if (CHIP_REV_IS_SLOW(bp))
7345 msleep(MCP_ONE_TIMEOUT*10);
7346 else
7347 msleep(MCP_ONE_TIMEOUT);
7348}
7349
7350static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
7351{
7352 u32 shmem, cnt, validity_offset, val;
7353 int rc = 0;
7354
7355 msleep(100);
7356
7357 /* Get shmem offset */
7358 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7359 if (shmem == 0) {
7360 BNX2X_ERR("Shmem 0 return failure\n");
7361 rc = -ENOTTY;
7362 goto exit_lbl;
7363 }
7364
7365 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7366
7367 /* Wait for MCP to come up */
7368 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
7369 /* TBD: its best to check validity map of last port.
7370 * currently checks on port 0.
7371 */
7372 val = REG_RD(bp, shmem + validity_offset);
7373 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
7374 shmem + validity_offset, val);
7375
7376 /* check that shared memory is valid. */
7377 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7378 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7379 break;
7380
7381 bnx2x_mcp_wait_one(bp);
7382 }
7383
7384 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
7385
7386 /* Check that shared memory is valid. This indicates that MCP is up. */
7387 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
7388 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
7389 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
7390 rc = -ENOTTY;
7391 goto exit_lbl;
7392 }
7393
7394exit_lbl:
7395 /* Restore the `magic' bit value */
7396 if (!CHIP_IS_E1(bp))
7397 bnx2x_clp_reset_done(bp, magic_val);
7398
7399 return rc;
7400}
7401
7402static void bnx2x_pxp_prep(struct bnx2x *bp)
7403{
7404 if (!CHIP_IS_E1(bp)) {
7405 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
7406 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
7407 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
7408 mmiowb();
7409 }
7410}
7411
7412/*
7413 * Reset the whole chip except for:
7414 * - PCIE core
7415 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
7416 * one reset bit)
7417 * - IGU
7418 * - MISC (including AEU)
7419 * - GRC
7420 * - RBCN, RBCP
7421 */
7422static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
7423{
7424 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
7425
7426 not_reset_mask1 =
7427 MISC_REGISTERS_RESET_REG_1_RST_HC |
7428 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
7429 MISC_REGISTERS_RESET_REG_1_RST_PXP;
7430
7431 not_reset_mask2 =
7432 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
7433 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
7434 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
7435 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
7436 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
7437 MISC_REGISTERS_RESET_REG_2_RST_GRC |
7438 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
7439 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
7440
7441 reset_mask1 = 0xffffffff;
7442
7443 if (CHIP_IS_E1(bp))
7444 reset_mask2 = 0xffff;
7445 else
7446 reset_mask2 = 0x1ffff;
7447
7448 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7449 reset_mask1 & (~not_reset_mask1));
7450 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7451 reset_mask2 & (~not_reset_mask2));
7452
7453 barrier();
7454 mmiowb();
7455
7456 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
7457 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
7458 mmiowb();
7459}
7460
7461static int bnx2x_process_kill(struct bnx2x *bp)
7462{
7463 int cnt = 1000;
7464 u32 val = 0;
7465 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
7466
7467
7468 /* Empty the Tetris buffer, wait for 1s */
7469 do {
7470 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
7471 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
7472 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
7473 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
7474 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
7475 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
7476 ((port_is_idle_0 & 0x1) == 0x1) &&
7477 ((port_is_idle_1 & 0x1) == 0x1) &&
7478 (pgl_exp_rom2 == 0xffffffff))
7479 break;
7480 msleep(1);
7481 } while (cnt-- > 0);
7482
7483 if (cnt <= 0) {
7484 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
7485 " are still"
7486 " outstanding read requests after 1s!\n");
7487 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
7488 " port_is_idle_0=0x%08x,"
7489 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
7490 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
7491 pgl_exp_rom2);
7492 return -EAGAIN;
7493 }
7494
7495 barrier();
7496
7497 /* Close gates #2, #3 and #4 */
7498 bnx2x_set_234_gates(bp, true);
7499
7500 /* TBD: Indicate that "process kill" is in progress to MCP */
7501
7502 /* Clear "unprepared" bit */
7503 REG_WR(bp, MISC_REG_UNPREPARED, 0);
7504 barrier();
7505
7506 /* Make sure all is written to the chip before the reset */
7507 mmiowb();
7508
7509 /* Wait for 1ms to empty GLUE and PCI-E core queues,
7510 * PSWHST, GRC and PSWRD Tetris buffer.
7511 */
7512 msleep(1);
7513
7514 /* Prepare to chip reset: */
7515 /* MCP */
7516 bnx2x_reset_mcp_prep(bp, &val);
7517
7518 /* PXP */
7519 bnx2x_pxp_prep(bp);
7520 barrier();
7521
7522 /* reset the chip */
7523 bnx2x_process_kill_chip_reset(bp);
7524 barrier();
7525
7526 /* Recover after reset: */
7527 /* MCP */
7528 if (bnx2x_reset_mcp_comp(bp, val))
7529 return -EAGAIN;
7530
7531 /* PXP */
7532 bnx2x_pxp_prep(bp);
7533
7534 /* Open the gates #2, #3 and #4 */
7535 bnx2x_set_234_gates(bp, false);
7536
7537 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7538 * reset state, re-enable attentions. */
7539
7540 return 0;
7541}
7542
7543static int bnx2x_leader_reset(struct bnx2x *bp)
7544{
7545 int rc = 0;
7546 /* Try to recover after the failure */
7547 if (bnx2x_process_kill(bp)) {
7548 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
7549 bp->dev->name);
7550 rc = -EAGAIN;
7551 goto exit_leader_reset;
7552 }
7553
7554 /* Clear "reset is in progress" bit and update the driver state */
7555 bnx2x_set_reset_done(bp);
7556 bp->recovery_state = BNX2X_RECOVERY_DONE;
7557
7558exit_leader_reset:
7559 bp->is_leader = 0;
7560 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
7561 smp_wmb();
7562 return rc;
7563}
7564
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007565/* Assumption: runs under rtnl lock. This together with the fact
7566 * that it's called only from bnx2x_reset_task() ensure that it
7567 * will never be called when netif_running(bp->dev) is false.
7568 */
7569static void bnx2x_parity_recover(struct bnx2x *bp)
7570{
7571 DP(NETIF_MSG_HW, "Handling parity\n");
7572 while (1) {
7573 switch (bp->recovery_state) {
7574 case BNX2X_RECOVERY_INIT:
7575 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
7576 /* Try to get a LEADER_LOCK HW lock */
7577 if (bnx2x_trylock_hw_lock(bp,
7578 HW_LOCK_RESOURCE_RESERVED_08))
7579 bp->is_leader = 1;
7580
7581 /* Stop the driver */
7582 /* If interface has been removed - break */
7583 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
7584 return;
7585
7586 bp->recovery_state = BNX2X_RECOVERY_WAIT;
7587 /* Ensure "is_leader" and "recovery_state"
7588 * update values are seen on other CPUs
7589 */
7590 smp_wmb();
7591 break;
7592
7593 case BNX2X_RECOVERY_WAIT:
7594 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
7595 if (bp->is_leader) {
7596 u32 load_counter = bnx2x_get_load_cnt(bp);
7597 if (load_counter) {
7598 /* Wait until all other functions get
7599 * down.
7600 */
7601 schedule_delayed_work(&bp->reset_task,
7602 HZ/10);
7603 return;
7604 } else {
7605 /* If all other functions got down -
7606 * try to bring the chip back to
7607 * normal. In any case it's an exit
7608 * point for a leader.
7609 */
7610 if (bnx2x_leader_reset(bp) ||
7611 bnx2x_nic_load(bp, LOAD_NORMAL)) {
7612 printk(KERN_ERR"%s: Recovery "
7613 "has failed. Power cycle is "
7614 "needed.\n", bp->dev->name);
7615 /* Disconnect this device */
7616 netif_device_detach(bp->dev);
7617 /* Block ifup for all function
7618 * of this ASIC until
7619 * "process kill" or power
7620 * cycle.
7621 */
7622 bnx2x_set_reset_in_progress(bp);
7623 /* Shut down the power */
7624 bnx2x_set_power_state(bp,
7625 PCI_D3hot);
7626 return;
7627 }
7628
7629 return;
7630 }
7631 } else { /* non-leader */
7632 if (!bnx2x_reset_is_done(bp)) {
7633 /* Try to get a LEADER_LOCK HW lock as
7634 * long as a former leader may have
7635 * been unloaded by the user or
7636 * released a leadership by another
7637 * reason.
7638 */
7639 if (bnx2x_trylock_hw_lock(bp,
7640 HW_LOCK_RESOURCE_RESERVED_08)) {
7641 /* I'm a leader now! Restart a
7642 * switch case.
7643 */
7644 bp->is_leader = 1;
7645 break;
7646 }
7647
7648 schedule_delayed_work(&bp->reset_task,
7649 HZ/10);
7650 return;
7651
7652 } else { /* A leader has completed
7653 * the "process kill". It's an exit
7654 * point for a non-leader.
7655 */
7656 bnx2x_nic_load(bp, LOAD_NORMAL);
7657 bp->recovery_state =
7658 BNX2X_RECOVERY_DONE;
7659 smp_wmb();
7660 return;
7661 }
7662 }
7663 default:
7664 return;
7665 }
7666 }
7667}
7668
7669/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7670 * scheduled on a general queue in order to prevent a dead lock.
7671 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007672static void bnx2x_reset_task(struct work_struct *work)
7673{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007674 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007675
7676#ifdef BNX2X_STOP_ON_ERROR
7677 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7678 " so reset not done to allow debug dump,\n"
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007679 KERN_ERR " you will need to reboot when done\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007680 return;
7681#endif
7682
7683 rtnl_lock();
7684
7685 if (!netif_running(bp->dev))
7686 goto reset_task_exit;
7687
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007688 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
7689 bnx2x_parity_recover(bp);
7690 else {
7691 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7692 bnx2x_nic_load(bp, LOAD_NORMAL);
7693 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007694
7695reset_task_exit:
7696 rtnl_unlock();
7697}
7698
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007699/* end of nic load/unload */
7700
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007701/*
7702 * Init service functions
7703 */
7704
stephen hemminger8d962862010-10-21 07:50:56 +00007705static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007706{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007707 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
7708 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
7709 return base + (BP_ABS_FUNC(bp)) * stride;
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007710}
7711
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007712static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007713{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007714 u32 reg = bnx2x_get_pretend_reg(bp);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007715
7716 /* Flush all outstanding writes */
7717 mmiowb();
7718
7719 /* Pretend to be function 0 */
7720 REG_WR(bp, reg, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007721 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007722
7723 /* From now we are in the "like-E1" mode */
7724 bnx2x_int_disable(bp);
7725
7726 /* Flush all outstanding writes */
7727 mmiowb();
7728
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007729 /* Restore the original function */
7730 REG_WR(bp, reg, BP_ABS_FUNC(bp));
7731 REG_RD(bp, reg);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007732}
7733
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007734static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007735{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007736 if (CHIP_IS_E1(bp))
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007737 bnx2x_int_disable(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007738 else
7739 bnx2x_undi_int_disable_e1h(bp);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007740}
7741
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007742static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007743{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007744 u32 val;
7745
7746 /* Check if there is any driver already loaded */
7747 val = REG_RD(bp, MISC_REG_UNPREPARED);
7748 if (val == 0x1) {
7749 /* Check if it is the UNDI driver
7750 * UNDI driver initializes CID offset for normal bell to 0x7
7751 */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07007752 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007753 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7754 if (val == 0x7) {
7755 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007756 /* save our pf_num */
7757 int orig_pf_num = bp->pf_num;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007758 u32 swap_en;
7759 u32 swap_val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007760
Eilon Greensteinb4661732009-01-14 06:43:56 +00007761 /* clear the UNDI indication */
7762 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7763
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007764 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7765
7766 /* try unload UNDI on port 0 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007767 bp->pf_num = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007768 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007769 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007770 DRV_MSG_SEQ_NUMBER_MASK);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007771 reset_code = bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007772
7773 /* if UNDI is loaded on the other port */
7774 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7775
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007776 /* send "DONE" for previous unload */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007777 bnx2x_fw_command(bp,
7778 DRV_MSG_CODE_UNLOAD_DONE, 0);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007779
7780 /* unload UNDI on port 1 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007781 bp->pf_num = 1;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007782 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007783 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007784 DRV_MSG_SEQ_NUMBER_MASK);
7785 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007786
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007787 bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007788 }
7789
Eilon Greensteinb4661732009-01-14 06:43:56 +00007790 /* now it's safe to release the lock */
7791 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7792
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007793 bnx2x_undi_int_disable(bp);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007794
7795 /* close input traffic and wait for it */
7796 /* Do not rcv packets to BRB */
7797 REG_WR(bp,
7798 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7799 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7800 /* Do not direct rcv packets that are not for MCP to
7801 * the BRB */
7802 REG_WR(bp,
7803 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7804 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7805 /* clear AEU */
7806 REG_WR(bp,
7807 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7808 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7809 msleep(10);
7810
7811 /* save NIG port swap info */
7812 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7813 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007814 /* reset device */
7815 REG_WR(bp,
7816 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007817 0xd3ffffff);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007818 REG_WR(bp,
7819 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7820 0x1403);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007821 /* take the NIG out of reset and restore swap values */
7822 REG_WR(bp,
7823 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7824 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7825 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7826 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7827
7828 /* send unload done to the MCP */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007829 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007830
7831 /* restore our func and fw_seq */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007832 bp->pf_num = orig_pf_num;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007833 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007834 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007835 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greensteinb4661732009-01-14 06:43:56 +00007836 } else
7837 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007838 }
7839}
7840
7841static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7842{
7843 u32 val, val2, val3, val4, id;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07007844 u16 pmc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007845
7846 /* Get the chip revision id and number. */
7847 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7848 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7849 id = ((val & 0xffff) << 16);
7850 val = REG_RD(bp, MISC_REG_CHIP_REV);
7851 id |= ((val & 0xf) << 12);
7852 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7853 id |= ((val & 0xff) << 4);
Eilon Greenstein5a40e082009-01-14 06:44:04 +00007854 val = REG_RD(bp, MISC_REG_BOND_ID);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007855 id |= (val & 0xf);
7856 bp->common.chip_id = id;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007857
7858 /* Set doorbell size */
7859 bp->db_size = (1 << BNX2X_DB_SHIFT);
7860
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007861 if (CHIP_IS_E2(bp)) {
7862 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7863 if ((val & 1) == 0)
7864 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7865 else
7866 val = (val >> 1) & 1;
7867 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7868 "2_PORT_MODE");
7869 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7870 CHIP_2_PORT_MODE;
7871
7872 if (CHIP_MODE_IS_4_PORT(bp))
7873 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
7874 else
7875 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
7876 } else {
7877 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
7878 bp->pfid = bp->pf_num; /* 0..7 */
7879 }
7880
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007881 /*
7882 * set base FW non-default (fast path) status block id, this value is
7883 * used to initialize the fw_sb_id saved on the fp/queue structure to
7884 * determine the id used by the FW.
7885 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007886 if (CHIP_IS_E1x(bp))
7887 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7888 else /* E2 */
7889 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7890
7891 bp->link_params.chip_id = bp->common.chip_id;
7892 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007893
Eilon Greenstein1c063282009-02-12 08:36:43 +00007894 val = (REG_RD(bp, 0x2874) & 0x55);
7895 if ((bp->common.chip_id & 0x1) ||
7896 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7897 bp->flags |= ONE_PORT_FLAG;
7898 BNX2X_DEV_INFO("single port device\n");
7899 }
7900
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007901 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7902 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7903 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7904 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7905 bp->common.flash_size, bp->common.flash_size);
7906
7907 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007908 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
7909 MISC_REG_GENERIC_CR_1 :
7910 MISC_REG_GENERIC_CR_0));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007911 bp->link_params.shmem_base = bp->common.shmem_base;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007912 bp->link_params.shmem2_base = bp->common.shmem2_base;
Eilon Greenstein2691d512009-08-12 08:22:08 +00007913 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7914 bp->common.shmem_base, bp->common.shmem2_base);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007915
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007916 if (!bp->common.shmem_base) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007917 BNX2X_DEV_INFO("MCP not active\n");
7918 bp->flags |= NO_MCP_FLAG;
7919 return;
7920 }
7921
7922 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7923 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7924 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007925 BNX2X_ERR("BAD MCP validity signature\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007926
7927 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00007928 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007929
7930 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7931 SHARED_HW_CFG_LED_MODE_MASK) >>
7932 SHARED_HW_CFG_LED_MODE_SHIFT);
7933
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00007934 bp->link_params.feature_config_flags = 0;
7935 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7936 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7937 bp->link_params.feature_config_flags |=
7938 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7939 else
7940 bp->link_params.feature_config_flags &=
7941 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7942
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007943 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7944 bp->common.bc_ver = val;
7945 BNX2X_DEV_INFO("bc_ver %X\n", val);
7946 if (val < BNX2X_BC_VER) {
7947 /* for now only warn
7948 * later we might need to enforce this */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007949 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
7950 "please upgrade BC\n", BNX2X_BC_VER, val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007951 }
Eilon Greenstein4d295db2009-07-21 05:47:47 +00007952 bp->link_params.feature_config_flags |=
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007953 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007954 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7955
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007956 bp->link_params.feature_config_flags |=
7957 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
7958 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07007959
7960 if (BP_E1HVN(bp) == 0) {
7961 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7962 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7963 } else {
7964 /* no WOL capability for E1HVN != 0 */
7965 bp->flags |= NO_WOL_FLAG;
7966 }
7967 BNX2X_DEV_INFO("%sWoL capable\n",
Eilon Greensteinf5372252009-02-12 08:38:30 +00007968 (bp->flags & NO_WOL_FLAG) ? "not " : "");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007969
7970 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7971 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7972 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7973 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7974
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007975 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
7976 val, val2, val3, val4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007977}
7978
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007979#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7980#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7981
7982static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7983{
7984 int pfid = BP_FUNC(bp);
7985 int vn = BP_E1HVN(bp);
7986 int igu_sb_id;
7987 u32 val;
7988 u8 fid;
7989
7990 bp->igu_base_sb = 0xff;
7991 bp->igu_sb_cnt = 0;
7992 if (CHIP_INT_MODE_IS_BC(bp)) {
7993 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00007994 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007995
7996 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
7997 FP_SB_MAX_E1x;
7998
7999 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
8000 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
8001
8002 return;
8003 }
8004
8005 /* IGU in normal mode - read CAM */
8006 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
8007 igu_sb_id++) {
8008 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
8009 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
8010 continue;
8011 fid = IGU_FID(val);
8012 if ((fid & IGU_FID_ENCODE_IS_PF)) {
8013 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
8014 continue;
8015 if (IGU_VEC(val) == 0)
8016 /* default status block */
8017 bp->igu_dsb_id = igu_sb_id;
8018 else {
8019 if (bp->igu_base_sb == 0xff)
8020 bp->igu_base_sb = igu_sb_id;
8021 bp->igu_sb_cnt++;
8022 }
8023 }
8024 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00008025 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8026 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008027 if (bp->igu_sb_cnt == 0)
8028 BNX2X_ERR("CAM configuration error\n");
8029}
8030
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008031static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8032 u32 switch_cfg)
8033{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008034 int cfg_size = 0, idx, port = BP_PORT(bp);
8035
8036 /* Aggregation of supported attributes of all external phys */
8037 bp->port.supported[0] = 0;
8038 bp->port.supported[1] = 0;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008039 switch (bp->link_params.num_phys) {
8040 case 1:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008041 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
8042 cfg_size = 1;
8043 break;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008044 case 2:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008045 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
8046 cfg_size = 1;
8047 break;
8048 case 3:
8049 if (bp->link_params.multi_phy_config &
8050 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
8051 bp->port.supported[1] =
8052 bp->link_params.phy[EXT_PHY1].supported;
8053 bp->port.supported[0] =
8054 bp->link_params.phy[EXT_PHY2].supported;
8055 } else {
8056 bp->port.supported[0] =
8057 bp->link_params.phy[EXT_PHY1].supported;
8058 bp->port.supported[1] =
8059 bp->link_params.phy[EXT_PHY2].supported;
8060 }
8061 cfg_size = 2;
8062 break;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008063 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008064
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008065 if (!(bp->port.supported[0] || bp->port.supported[1])) {
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008066 BNX2X_ERR("NVRAM config error. BAD phy config."
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008067 "PHY1 config 0x%x, PHY2 config 0x%x\n",
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008068 SHMEM_RD(bp,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008069 dev_info.port_hw_config[port].external_phy_config),
8070 SHMEM_RD(bp,
8071 dev_info.port_hw_config[port].external_phy_config2));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008072 return;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008073 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008074
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008075 switch (switch_cfg) {
8076 case SWITCH_CFG_1G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008077 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8078 port*0x10);
8079 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008080 break;
8081
8082 case SWITCH_CFG_10G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008083 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8084 port*0x18);
8085 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008086 break;
8087
8088 default:
8089 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008090 bp->port.link_config[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008091 return;
8092 }
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008093 /* mask what we support according to speed_cap_mask per configuration */
8094 for (idx = 0; idx < cfg_size; idx++) {
8095 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008096 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008097 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008098
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008099 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008100 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008101 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008102
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008103 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008104 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008105 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008106
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008107 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008108 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008109 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008110
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008111 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008112 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008113 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008114 SUPPORTED_1000baseT_Full);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008115
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008116 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008117 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008118 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008119
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008120 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008121 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008122 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008123
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008124 }
8125
8126 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
8127 bp->port.supported[1]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008128}
8129
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008130static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008131{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008132 u32 link_config, idx, cfg_size = 0;
8133 bp->port.advertising[0] = 0;
8134 bp->port.advertising[1] = 0;
8135 switch (bp->link_params.num_phys) {
8136 case 1:
8137 case 2:
8138 cfg_size = 1;
8139 break;
8140 case 3:
8141 cfg_size = 2;
8142 break;
8143 }
8144 for (idx = 0; idx < cfg_size; idx++) {
8145 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
8146 link_config = bp->port.link_config[idx];
8147 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008148 case PORT_FEATURE_LINK_SPEED_AUTO:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008149 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
8150 bp->link_params.req_line_speed[idx] =
8151 SPEED_AUTO_NEG;
8152 bp->port.advertising[idx] |=
8153 bp->port.supported[idx];
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008154 } else {
8155 /* force 10G, no AN */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008156 bp->link_params.req_line_speed[idx] =
8157 SPEED_10000;
8158 bp->port.advertising[idx] |=
8159 (ADVERTISED_10000baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008160 ADVERTISED_FIBRE);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008161 continue;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008162 }
8163 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008164
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008165 case PORT_FEATURE_LINK_SPEED_10M_FULL:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008166 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
8167 bp->link_params.req_line_speed[idx] =
8168 SPEED_10;
8169 bp->port.advertising[idx] |=
8170 (ADVERTISED_10baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008171 ADVERTISED_TP);
8172 } else {
8173 BNX2X_ERROR("NVRAM config error. "
8174 "Invalid link_config 0x%x"
8175 " speed_cap_mask 0x%x\n",
8176 link_config,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008177 bp->link_params.speed_cap_mask[idx]);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008178 return;
8179 }
8180 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008181
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008182 case PORT_FEATURE_LINK_SPEED_10M_HALF:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008183 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
8184 bp->link_params.req_line_speed[idx] =
8185 SPEED_10;
8186 bp->link_params.req_duplex[idx] =
8187 DUPLEX_HALF;
8188 bp->port.advertising[idx] |=
8189 (ADVERTISED_10baseT_Half |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008190 ADVERTISED_TP);
8191 } else {
8192 BNX2X_ERROR("NVRAM config error. "
8193 "Invalid link_config 0x%x"
8194 " speed_cap_mask 0x%x\n",
8195 link_config,
8196 bp->link_params.speed_cap_mask[idx]);
8197 return;
8198 }
8199 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008200
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008201 case PORT_FEATURE_LINK_SPEED_100M_FULL:
8202 if (bp->port.supported[idx] &
8203 SUPPORTED_100baseT_Full) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008204 bp->link_params.req_line_speed[idx] =
8205 SPEED_100;
8206 bp->port.advertising[idx] |=
8207 (ADVERTISED_100baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008208 ADVERTISED_TP);
8209 } else {
8210 BNX2X_ERROR("NVRAM config error. "
8211 "Invalid link_config 0x%x"
8212 " speed_cap_mask 0x%x\n",
8213 link_config,
8214 bp->link_params.speed_cap_mask[idx]);
8215 return;
8216 }
8217 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008218
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008219 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8220 if (bp->port.supported[idx] &
8221 SUPPORTED_100baseT_Half) {
8222 bp->link_params.req_line_speed[idx] =
8223 SPEED_100;
8224 bp->link_params.req_duplex[idx] =
8225 DUPLEX_HALF;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008226 bp->port.advertising[idx] |=
8227 (ADVERTISED_100baseT_Half |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008228 ADVERTISED_TP);
8229 } else {
8230 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008231 "Invalid link_config 0x%x"
8232 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008233 link_config,
8234 bp->link_params.speed_cap_mask[idx]);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008235 return;
8236 }
8237 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008238
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008239 case PORT_FEATURE_LINK_SPEED_1G:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008240 if (bp->port.supported[idx] &
8241 SUPPORTED_1000baseT_Full) {
8242 bp->link_params.req_line_speed[idx] =
8243 SPEED_1000;
8244 bp->port.advertising[idx] |=
8245 (ADVERTISED_1000baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008246 ADVERTISED_TP);
8247 } else {
8248 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008249 "Invalid link_config 0x%x"
8250 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008251 link_config,
8252 bp->link_params.speed_cap_mask[idx]);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008253 return;
8254 }
8255 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008256
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008257 case PORT_FEATURE_LINK_SPEED_2_5G:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008258 if (bp->port.supported[idx] &
8259 SUPPORTED_2500baseX_Full) {
8260 bp->link_params.req_line_speed[idx] =
8261 SPEED_2500;
8262 bp->port.advertising[idx] |=
8263 (ADVERTISED_2500baseX_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008264 ADVERTISED_TP);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008265 } else {
8266 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008267 "Invalid link_config 0x%x"
8268 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008269 link_config,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008270 bp->link_params.speed_cap_mask[idx]);
8271 return;
8272 }
8273 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008274
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008275 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8276 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8277 case PORT_FEATURE_LINK_SPEED_10G_KR:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008278 if (bp->port.supported[idx] &
8279 SUPPORTED_10000baseT_Full) {
8280 bp->link_params.req_line_speed[idx] =
8281 SPEED_10000;
8282 bp->port.advertising[idx] |=
8283 (ADVERTISED_10000baseT_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008284 ADVERTISED_FIBRE);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008285 } else {
8286 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008287 "Invalid link_config 0x%x"
8288 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008289 link_config,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008290 bp->link_params.speed_cap_mask[idx]);
8291 return;
8292 }
8293 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008294
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008295 default:
8296 BNX2X_ERROR("NVRAM config error. "
8297 "BAD link speed link_config 0x%x\n",
8298 link_config);
8299 bp->link_params.req_line_speed[idx] =
8300 SPEED_AUTO_NEG;
8301 bp->port.advertising[idx] =
8302 bp->port.supported[idx];
8303 break;
8304 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008305
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008306 bp->link_params.req_flow_ctrl[idx] = (link_config &
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008307 PORT_FEATURE_FLOW_CONTROL_MASK);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008308 if ((bp->link_params.req_flow_ctrl[idx] ==
8309 BNX2X_FLOW_CTRL_AUTO) &&
8310 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
8311 bp->link_params.req_flow_ctrl[idx] =
8312 BNX2X_FLOW_CTRL_NONE;
8313 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008314
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008315 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
8316 " 0x%x advertising 0x%x\n",
8317 bp->link_params.req_line_speed[idx],
8318 bp->link_params.req_duplex[idx],
8319 bp->link_params.req_flow_ctrl[idx],
8320 bp->port.advertising[idx]);
8321 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008322}
8323
Michael Chane665bfd2009-10-10 13:46:54 +00008324static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8325{
8326 mac_hi = cpu_to_be16(mac_hi);
8327 mac_lo = cpu_to_be32(mac_lo);
8328 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8329 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8330}
8331
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008332static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008333{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008334 int port = BP_PORT(bp);
Eilon Greenstein589abe32009-02-12 08:36:55 +00008335 u32 config;
Joe Perches6f38ad92010-11-14 17:04:31 +00008336 u32 ext_phy_type, ext_phy_config;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008337
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008338 bp->link_params.bp = bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008339 bp->link_params.port = port;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008340
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008341 bp->link_params.lane_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008342 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008343
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008344 bp->link_params.speed_cap_mask[0] =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008345 SHMEM_RD(bp,
8346 dev_info.port_hw_config[port].speed_capability_mask);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008347 bp->link_params.speed_cap_mask[1] =
8348 SHMEM_RD(bp,
8349 dev_info.port_hw_config[port].speed_capability_mask2);
8350 bp->port.link_config[0] =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008351 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8352
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008353 bp->port.link_config[1] =
8354 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008355
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008356 bp->link_params.multi_phy_config =
8357 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008358 /* If the device is capable of WoL, set the default state according
8359 * to the HW
8360 */
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008361 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008362 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8363 (config & PORT_FEATURE_WOL_ENABLED));
8364
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008365 BNX2X_DEV_INFO("lane_config 0x%08x "
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008366 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008367 bp->link_params.lane_config,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008368 bp->link_params.speed_cap_mask[0],
8369 bp->port.link_config[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008370
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008371 bp->link_params.switch_cfg = (bp->port.link_config[0] &
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008372 PORT_FEATURE_CONNECTED_SWITCH_MASK);
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008373 bnx2x_phy_probe(&bp->link_params);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008374 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008375
8376 bnx2x_link_settings_requested(bp);
8377
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008378 /*
8379 * If connected directly, work with the internal PHY, otherwise, work
8380 * with the external PHY
8381 */
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008382 ext_phy_config =
8383 SHMEM_RD(bp,
8384 dev_info.port_hw_config[port].external_phy_config);
8385 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008386 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008387 bp->mdio.prtad = bp->port.phy_addr;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008388
8389 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8390 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8391 bp->mdio.prtad =
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008392 XGXS_EXT_PHY_ADDR(ext_phy_config);
Yaniv Rosner5866df62011-01-30 04:15:07 +00008393
8394 /*
8395 * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
8396 * In MF mode, it is set to cover self test cases
8397 */
8398 if (IS_MF(bp))
8399 bp->port.need_hw_lock = 1;
8400 else
8401 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
8402 bp->common.shmem_base,
8403 bp->common.shmem2_base);
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008404}
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008405
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008406static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8407{
8408 u32 val, val2;
8409 int func = BP_ABS_FUNC(bp);
8410 int port = BP_PORT(bp);
8411
8412 if (BP_NOMCP(bp)) {
8413 BNX2X_ERROR("warning: random MAC workaround active\n");
8414 random_ether_addr(bp->dev->dev_addr);
8415 } else if (IS_MF(bp)) {
8416 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8417 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
8418 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8419 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
8420 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8421
8422#ifdef BCM_CNIC
8423 /* iSCSI NPAR MAC */
8424 if (IS_MF_SI(bp)) {
8425 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
8426 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
8427 val2 = MF_CFG_RD(bp, func_ext_config[func].
8428 iscsi_mac_addr_upper);
8429 val = MF_CFG_RD(bp, func_ext_config[func].
8430 iscsi_mac_addr_lower);
8431 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8432 }
8433 }
8434#endif
8435 } else {
8436 /* in SF read MACs from port configuration */
8437 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8438 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8439 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8440
8441#ifdef BCM_CNIC
8442 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
8443 iscsi_mac_upper);
8444 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
8445 iscsi_mac_lower);
8446 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8447#endif
8448 }
8449
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008450 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8451 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
Michael Chan37b091b2009-10-10 13:46:55 +00008452
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00008453#ifdef BCM_CNIC
8454 /* Inform the upper layers about FCoE MAC */
8455 if (!CHIP_IS_E1x(bp)) {
8456 if (IS_MF_SD(bp))
8457 memcpy(bp->fip_mac, bp->dev->dev_addr,
8458 sizeof(bp->fip_mac));
8459 else
8460 memcpy(bp->fip_mac, bp->iscsi_mac,
8461 sizeof(bp->fip_mac));
8462 }
8463#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008464}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008465
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008466static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8467{
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008468 int /*abs*/func = BP_ABS_FUNC(bp);
8469 int vn, port;
8470 u32 val = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008471 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008472
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008473 bnx2x_get_common_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008474
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008475 if (CHIP_IS_E1x(bp)) {
8476 bp->common.int_block = INT_BLOCK_HC;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008477
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008478 bp->igu_dsb_id = DEF_SB_IGU_ID;
8479 bp->igu_base_sb = 0;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00008480 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
8481 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008482 } else {
8483 bp->common.int_block = INT_BLOCK_IGU;
8484 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8485 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8486 DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8487 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8488 } else
8489 DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
8490
8491 bnx2x_get_igu_cam_info(bp);
8492
8493 }
8494 DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
8495 bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8496
8497 /*
8498 * Initialize MF configuration
8499 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008500
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008501 bp->mf_ov = 0;
8502 bp->mf_mode = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008503 vn = BP_E1HVN(bp);
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008504 port = BP_PORT(bp);
8505
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008506 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008507 DP(NETIF_MSG_PROBE,
8508 "shmem2base 0x%x, size %d, mfcfg offset %d\n",
8509 bp->common.shmem2_base, SHMEM2_RD(bp, size),
8510 (u32)offsetof(struct shmem2_region, mf_cfg_addr));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008511 if (SHMEM2_HAS(bp, mf_cfg_addr))
8512 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8513 else
8514 bp->common.mf_cfg_base = bp->common.shmem_base +
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008515 offsetof(struct shmem_region, func_mb) +
8516 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008517 /*
8518 * get mf configuration:
8519 * 1. existance of MF configuration
8520 * 2. MAC address must be legal (check only upper bytes)
8521 * for Switch-Independent mode;
8522 * OVLAN must be legal for Switch-Dependent mode
8523 * 3. SF_MODE configures specific MF mode
8524 */
8525 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
8526 /* get mf configuration */
8527 val = SHMEM_RD(bp,
8528 dev_info.shared_feature_config.config);
8529 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008530
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008531 switch (val) {
8532 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
8533 val = MF_CFG_RD(bp, func_mf_config[func].
8534 mac_upper);
8535 /* check for legal mac (upper bytes)*/
8536 if (val != 0xffff) {
8537 bp->mf_mode = MULTI_FUNCTION_SI;
8538 bp->mf_config[vn] = MF_CFG_RD(bp,
8539 func_mf_config[func].config);
8540 } else
8541 DP(NETIF_MSG_PROBE, "illegal MAC "
8542 "address for SI\n");
8543 break;
8544 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
8545 /* get OV configuration */
8546 val = MF_CFG_RD(bp,
8547 func_mf_config[FUNC_0].e1hov_tag);
8548 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
8549
8550 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8551 bp->mf_mode = MULTI_FUNCTION_SD;
8552 bp->mf_config[vn] = MF_CFG_RD(bp,
8553 func_mf_config[func].config);
8554 } else
8555 DP(NETIF_MSG_PROBE, "illegal OV for "
8556 "SD\n");
8557 break;
8558 default:
8559 /* Unknown configuration: reset mf_config */
8560 bp->mf_config[vn] = 0;
8561 DP(NETIF_MSG_PROBE, "Unkown MF mode 0x%x\n",
8562 val);
8563 }
8564 }
8565
Eilon Greenstein2691d512009-08-12 08:22:08 +00008566 BNX2X_DEV_INFO("%s function mode\n",
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008567 IS_MF(bp) ? "multi" : "single");
Eilon Greenstein2691d512009-08-12 08:22:08 +00008568
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008569 switch (bp->mf_mode) {
8570 case MULTI_FUNCTION_SD:
8571 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
8572 FUNC_MF_CFG_E1HOV_TAG_MASK;
Eilon Greenstein2691d512009-08-12 08:22:08 +00008573 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008574 bp->mf_ov = val;
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008575 BNX2X_DEV_INFO("MF OV for func %d is %d"
8576 " (0x%04x)\n", func,
8577 bp->mf_ov, bp->mf_ov);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008578 } else {
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008579 BNX2X_ERR("No valid MF OV for func %d,"
8580 " aborting\n", func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008581 rc = -EPERM;
8582 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008583 break;
8584 case MULTI_FUNCTION_SI:
8585 BNX2X_DEV_INFO("func %d is in MF "
8586 "switch-independent mode\n", func);
8587 break;
8588 default:
8589 if (vn) {
8590 BNX2X_ERR("VN %d in single function mode,"
8591 " aborting\n", vn);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008592 rc = -EPERM;
8593 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008594 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008595 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008596
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008597 }
8598
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008599 /* adjust igu_sb_cnt to MF for E1x */
8600 if (CHIP_IS_E1x(bp) && IS_MF(bp))
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008601 bp->igu_sb_cnt /= E1HVN_MAX;
8602
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008603 /*
8604 * adjust E2 sb count: to be removed when FW will support
8605 * more then 16 L2 clients
8606 */
8607#define MAX_L2_CLIENTS 16
8608 if (CHIP_IS_E2(bp))
8609 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8610 MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8611
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008612 if (!BP_NOMCP(bp)) {
8613 bnx2x_get_port_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008614
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008615 bp->fw_seq =
8616 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8617 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008618 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8619 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008620
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008621 /* Get MAC addresses */
8622 bnx2x_get_mac_hwinfo(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008623
8624 return rc;
8625}
8626
Vladislav Zolotarov34f24c72010-04-19 01:13:23 +00008627static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
8628{
8629 int cnt, i, block_end, rodi;
8630 char vpd_data[BNX2X_VPD_LEN+1];
8631 char str_id_reg[VENDOR_ID_LEN+1];
8632 char str_id_cap[VENDOR_ID_LEN+1];
8633 u8 len;
8634
8635 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
8636 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
8637
8638 if (cnt < BNX2X_VPD_LEN)
8639 goto out_not_found;
8640
8641 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
8642 PCI_VPD_LRDT_RO_DATA);
8643 if (i < 0)
8644 goto out_not_found;
8645
8646
8647 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
8648 pci_vpd_lrdt_size(&vpd_data[i]);
8649
8650 i += PCI_VPD_LRDT_TAG_SIZE;
8651
8652 if (block_end > BNX2X_VPD_LEN)
8653 goto out_not_found;
8654
8655 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8656 PCI_VPD_RO_KEYWORD_MFR_ID);
8657 if (rodi < 0)
8658 goto out_not_found;
8659
8660 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8661
8662 if (len != VENDOR_ID_LEN)
8663 goto out_not_found;
8664
8665 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8666
8667 /* vendor specific info */
8668 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
8669 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
8670 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
8671 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
8672
8673 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8674 PCI_VPD_RO_KEYWORD_VENDOR0);
8675 if (rodi >= 0) {
8676 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8677
8678 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8679
8680 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
8681 memcpy(bp->fw_ver, &vpd_data[rodi], len);
8682 bp->fw_ver[len] = ' ';
8683 }
8684 }
8685 return;
8686 }
8687out_not_found:
8688 return;
8689}
8690
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008691static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8692{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008693 int func;
Eilon Greenstein87942b42009-02-12 08:36:49 +00008694 int timer_interval;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008695 int rc;
8696
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008697 /* Disable interrupt handling until HW is initialized */
8698 atomic_set(&bp->intr_sem, 1);
Eilon Greensteine1510702009-07-21 05:47:41 +00008699 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008700
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008701 mutex_init(&bp->port.phy_mutex);
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07008702 mutex_init(&bp->fw_mb_mutex);
David S. Millerbb7e95c2010-07-27 21:01:35 -07008703 spin_lock_init(&bp->stats_lock);
Michael Chan993ac7b2009-10-10 13:46:56 +00008704#ifdef BCM_CNIC
8705 mutex_init(&bp->cnic_mutex);
8706#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008707
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08008708 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008709 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008710
8711 rc = bnx2x_get_hwinfo(bp);
8712
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008713 if (!rc)
8714 rc = bnx2x_alloc_mem_bp(bp);
8715
Vladislav Zolotarov34f24c72010-04-19 01:13:23 +00008716 bnx2x_read_fwinfo(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008717
8718 func = BP_FUNC(bp);
8719
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008720 /* need to reset chip if undi was active */
8721 if (!BP_NOMCP(bp))
8722 bnx2x_undi_unload(bp);
8723
8724 if (CHIP_REV_IS_FPGA(bp))
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008725 dev_err(&bp->pdev->dev, "FPGA detected\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008726
8727 if (BP_NOMCP(bp) && (func == 0))
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008728 dev_err(&bp->pdev->dev, "MCP disabled, "
8729 "must load devices in order!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008730
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008731 bp->multi_mode = multi_mode;
Dmitry Kravkov5d7cd492010-07-27 12:32:19 +00008732 bp->int_mode = int_mode;
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008733
Dmitry Kravkov4fd89b7a2010-04-01 19:45:34 -07008734 bp->dev->features |= NETIF_F_GRO;
8735
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008736 /* Set TPA flags */
8737 if (disable_tpa) {
8738 bp->flags &= ~TPA_ENABLE_FLAG;
8739 bp->dev->features &= ~NETIF_F_LRO;
8740 } else {
8741 bp->flags |= TPA_ENABLE_FLAG;
8742 bp->dev->features |= NETIF_F_LRO;
8743 }
Dmitry Kravkov5d7cd492010-07-27 12:32:19 +00008744 bp->disable_tpa = disable_tpa;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008745
Eilon Greensteina18f5122009-08-12 08:23:26 +00008746 if (CHIP_IS_E1(bp))
8747 bp->dropless_fc = 0;
8748 else
8749 bp->dropless_fc = dropless_fc;
8750
Eilon Greenstein8d5726c2009-02-12 08:37:19 +00008751 bp->mrrs = mrrs;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008752
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008753 bp->tx_ring_size = MAX_TX_AVAIL;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008754
8755 bp->rx_csum = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008756
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00008757 /* make sure that the numbers are in the right granularity */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008758 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
8759 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008760
Eilon Greenstein87942b42009-02-12 08:36:49 +00008761 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8762 bp->current_interval = (poll ? poll : timer_interval);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008763
8764 init_timer(&bp->timer);
8765 bp->timer.expires = jiffies + bp->current_interval;
8766 bp->timer.data = (unsigned long) bp;
8767 bp->timer.function = bnx2x_timer;
8768
Shmulik Ravid785b9b12010-12-30 06:27:03 +00008769 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
Vladislav Zolotarove4901dd2010-12-13 05:44:18 +00008770 bnx2x_dcbx_init_params(bp);
8771
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008772 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008773}
8774
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008775
Dmitry Kravkovde0c62d2010-07-27 12:35:24 +00008776/****************************************************************************
8777* General service functions
8778****************************************************************************/
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008779
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008780/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008781static int bnx2x_open(struct net_device *dev)
8782{
8783 struct bnx2x *bp = netdev_priv(dev);
8784
Eilon Greenstein6eccabb2009-01-22 03:37:48 +00008785 netif_carrier_off(dev);
8786
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008787 bnx2x_set_power_state(bp, PCI_D0);
8788
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008789 if (!bnx2x_reset_is_done(bp)) {
8790 do {
8791 /* Reset MCP mail box sequence if there is on going
8792 * recovery
8793 */
8794 bp->fw_seq = 0;
8795
8796 /* If it's the first function to load and reset done
8797 * is still not cleared it may mean that. We don't
8798 * check the attention state here because it may have
8799 * already been cleared by a "common" reset but we
8800 * shell proceed with "process kill" anyway.
8801 */
8802 if ((bnx2x_get_load_cnt(bp) == 0) &&
8803 bnx2x_trylock_hw_lock(bp,
8804 HW_LOCK_RESOURCE_RESERVED_08) &&
8805 (!bnx2x_leader_reset(bp))) {
8806 DP(NETIF_MSG_HW, "Recovered in open\n");
8807 break;
8808 }
8809
8810 bnx2x_set_power_state(bp, PCI_D3hot);
8811
8812 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
8813 " completed yet. Try again later. If u still see this"
8814 " message after a few retries then power cycle is"
8815 " required.\n", bp->dev->name);
8816
8817 return -EAGAIN;
8818 } while (0);
8819 }
8820
8821 bp->recovery_state = BNX2X_RECOVERY_DONE;
8822
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008823 return bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008824}
8825
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008826/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008827static int bnx2x_close(struct net_device *dev)
8828{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008829 struct bnx2x *bp = netdev_priv(dev);
8830
8831 /* Unload the driver, release IRQs */
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008832 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
Vladislav Zolotarovd3dbfee2010-04-19 01:14:49 +00008833 bnx2x_set_power_state(bp, PCI_D3hot);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008834
8835 return 0;
8836}
8837
Eilon Greensteinf5372252009-02-12 08:38:30 +00008838/* called with netif_tx_lock from dev_mcast.c */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00008839void bnx2x_set_rx_mode(struct net_device *dev)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008840{
8841 struct bnx2x *bp = netdev_priv(dev);
8842 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8843 int port = BP_PORT(bp);
8844
8845 if (bp->state != BNX2X_STATE_OPEN) {
8846 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
8847 return;
8848 }
8849
8850 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
8851
8852 if (dev->flags & IFF_PROMISC)
8853 rx_mode = BNX2X_RX_MODE_PROMISC;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008854 else if ((dev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00008855 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
8856 CHIP_IS_E1(bp)))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008857 rx_mode = BNX2X_RX_MODE_ALLMULTI;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008858 else { /* some multicasts */
8859 if (CHIP_IS_E1(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008860 /*
8861 * set mc list, do not wait as wait implies sleep
8862 * and set_rx_mode can be invoked from non-sleepable
8863 * context
8864 */
8865 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
8866 BNX2X_MAX_EMUL_MULTI*(1 + port) :
8867 BNX2X_MAX_MULTICAST*(1 + port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008868
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008869 bnx2x_set_e1_mc_list(bp, offset);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008870 } else { /* E1H */
8871 /* Accept one or more multicasts */
Jiri Pirko22bedad32010-04-01 21:22:57 +00008872 struct netdev_hw_addr *ha;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008873 u32 mc_filter[MC_HASH_SIZE];
8874 u32 crc, bit, regidx;
8875 int i;
8876
8877 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
8878
Jiri Pirko22bedad32010-04-01 21:22:57 +00008879 netdev_for_each_mc_addr(ha, dev) {
Johannes Berg7c510e42008-10-27 17:47:26 -07008880 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008881 bnx2x_mc_addr(ha));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008882
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008883 crc = crc32c_le(0, bnx2x_mc_addr(ha),
8884 ETH_ALEN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008885 bit = (crc >> 24) & 0xff;
8886 regidx = bit >> 5;
8887 bit &= 0x1f;
8888 mc_filter[regidx] |= (1 << bit);
8889 }
8890
8891 for (i = 0; i < MC_HASH_SIZE; i++)
8892 REG_WR(bp, MC_HASH_OFFSET(bp, i),
8893 mc_filter[i]);
8894 }
8895 }
8896
8897 bp->rx_mode = rx_mode;
8898 bnx2x_set_storm_rx_mode(bp);
8899}
8900
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008901/* called with rtnl_lock */
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008902static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
8903 int devad, u16 addr)
8904{
8905 struct bnx2x *bp = netdev_priv(netdev);
8906 u16 value;
8907 int rc;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008908
8909 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
8910 prtad, devad, addr);
8911
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008912 /* The HW expects different devad if CL22 is used */
8913 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8914
8915 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnere10bc842010-09-07 11:40:50 +00008916 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008917 bnx2x_release_phy_lock(bp);
8918 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
8919
8920 if (!rc)
8921 rc = value;
8922 return rc;
8923}
8924
8925/* called with rtnl_lock */
8926static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
8927 u16 addr, u16 value)
8928{
8929 struct bnx2x *bp = netdev_priv(netdev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008930 int rc;
8931
8932 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
8933 " value 0x%x\n", prtad, devad, addr, value);
8934
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008935 /* The HW expects different devad if CL22 is used */
8936 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8937
8938 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnere10bc842010-09-07 11:40:50 +00008939 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008940 bnx2x_release_phy_lock(bp);
8941 return rc;
8942}
8943
8944/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008945static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8946{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008947 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008948 struct mii_ioctl_data *mdio = if_mii(ifr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008949
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008950 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
8951 mdio->phy_id, mdio->reg_num, mdio->val_in);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008952
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008953 if (!netif_running(dev))
8954 return -EAGAIN;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008955
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008956 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008957}
8958
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +00008959#ifdef CONFIG_NET_POLL_CONTROLLER
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008960static void poll_bnx2x(struct net_device *dev)
8961{
8962 struct bnx2x *bp = netdev_priv(dev);
8963
8964 disable_irq(bp->pdev->irq);
8965 bnx2x_interrupt(bp->pdev->irq, dev);
8966 enable_irq(bp->pdev->irq);
8967}
8968#endif
8969
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008970static const struct net_device_ops bnx2x_netdev_ops = {
8971 .ndo_open = bnx2x_open,
8972 .ndo_stop = bnx2x_close,
8973 .ndo_start_xmit = bnx2x_start_xmit,
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00008974 .ndo_select_queue = bnx2x_select_queue,
Eilon Greenstein356e2382009-02-12 08:38:32 +00008975 .ndo_set_multicast_list = bnx2x_set_rx_mode,
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008976 .ndo_set_mac_address = bnx2x_change_mac_addr,
8977 .ndo_validate_addr = eth_validate_addr,
8978 .ndo_do_ioctl = bnx2x_ioctl,
8979 .ndo_change_mtu = bnx2x_change_mtu,
8980 .ndo_tx_timeout = bnx2x_tx_timeout,
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +00008981#ifdef CONFIG_NET_POLL_CONTROLLER
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008982 .ndo_poll_controller = poll_bnx2x,
8983#endif
8984};
8985
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008986static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
8987 struct net_device *dev)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008988{
8989 struct bnx2x *bp;
8990 int rc;
8991
8992 SET_NETDEV_DEV(dev, &pdev->dev);
8993 bp = netdev_priv(dev);
8994
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008995 bp->dev = dev;
8996 bp->pdev = pdev;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008997 bp->flags = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008998 bp->pf_num = PCI_FUNC(pdev->devfn);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008999
9000 rc = pci_enable_device(pdev);
9001 if (rc) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009002 dev_err(&bp->pdev->dev,
9003 "Cannot enable PCI device, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009004 goto err_out;
9005 }
9006
9007 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009008 dev_err(&bp->pdev->dev,
9009 "Cannot find PCI device base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009010 rc = -ENODEV;
9011 goto err_out_disable;
9012 }
9013
9014 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009015 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
9016 " base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009017 rc = -ENODEV;
9018 goto err_out_disable;
9019 }
9020
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009021 if (atomic_read(&pdev->enable_cnt) == 1) {
9022 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9023 if (rc) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009024 dev_err(&bp->pdev->dev,
9025 "Cannot obtain PCI resources, aborting\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009026 goto err_out_disable;
9027 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009028
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009029 pci_set_master(pdev);
9030 pci_save_state(pdev);
9031 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009032
9033 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9034 if (bp->pm_cap == 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009035 dev_err(&bp->pdev->dev,
9036 "Cannot find power management capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009037 rc = -EIO;
9038 goto err_out_release;
9039 }
9040
9041 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9042 if (bp->pcie_cap == 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009043 dev_err(&bp->pdev->dev,
9044 "Cannot find PCI Express capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009045 rc = -EIO;
9046 goto err_out_release;
9047 }
9048
FUJITA Tomonori1a983142010-04-04 01:51:03 +00009049 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009050 bp->flags |= USING_DAC_FLAG;
FUJITA Tomonori1a983142010-04-04 01:51:03 +00009051 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009052 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
9053 " failed, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009054 rc = -EIO;
9055 goto err_out_release;
9056 }
9057
FUJITA Tomonori1a983142010-04-04 01:51:03 +00009058 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009059 dev_err(&bp->pdev->dev,
9060 "System does not support DMA, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009061 rc = -EIO;
9062 goto err_out_release;
9063 }
9064
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009065 dev->mem_start = pci_resource_start(pdev, 0);
9066 dev->base_addr = dev->mem_start;
9067 dev->mem_end = pci_resource_end(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009068
9069 dev->irq = pdev->irq;
9070
Arjan van de Ven275f1652008-10-20 21:42:39 -07009071 bp->regview = pci_ioremap_bar(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009072 if (!bp->regview) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009073 dev_err(&bp->pdev->dev,
9074 "Cannot map register space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009075 rc = -ENOMEM;
9076 goto err_out_release;
9077 }
9078
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009079 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009080 min_t(u64, BNX2X_DB_SIZE(bp),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009081 pci_resource_len(pdev, 2)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009082 if (!bp->doorbells) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009083 dev_err(&bp->pdev->dev,
9084 "Cannot map doorbell space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009085 rc = -ENOMEM;
9086 goto err_out_unmap;
9087 }
9088
9089 bnx2x_set_power_state(bp, PCI_D0);
9090
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009091 /* clean indirect addresses */
9092 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
9093 PCICFG_VENDOR_ID_OFFSET);
9094 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
9095 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
9096 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
9097 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009098
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009099 /* Reset the load counter */
9100 bnx2x_clear_load_cnt(bp);
9101
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009102 dev->watchdog_timeo = TX_TIMEOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009103
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08009104 dev->netdev_ops = &bnx2x_netdev_ops;
Dmitry Kravkovde0c62d2010-07-27 12:35:24 +00009105 bnx2x_set_ethtool_ops(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009106 dev->features |= NETIF_F_SG;
Michał Mirosław79032642010-11-30 06:38:00 +00009107 dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009108 if (bp->flags & USING_DAC_FLAG)
9109 dev->features |= NETIF_F_HIGHDMA;
Eilon Greenstein5316bc02009-07-21 05:47:43 +00009110 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9111 dev->features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009112 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
Eilon Greenstein5316bc02009-07-21 05:47:43 +00009113
9114 dev->vlan_features |= NETIF_F_SG;
Michał Mirosław79032642010-11-30 06:38:00 +00009115 dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Eilon Greenstein5316bc02009-07-21 05:47:43 +00009116 if (bp->flags & USING_DAC_FLAG)
9117 dev->vlan_features |= NETIF_F_HIGHDMA;
9118 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9119 dev->vlan_features |= NETIF_F_TSO6;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009120
Shmulik Ravid785b9b12010-12-30 06:27:03 +00009121#ifdef BCM_DCB
9122 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
9123#endif
9124
Eilon Greenstein01cd4522009-08-12 08:23:08 +00009125 /* get_port_hwinfo() will set prtad and mmds properly */
9126 bp->mdio.prtad = MDIO_PRTAD_NONE;
9127 bp->mdio.mmds = 0;
9128 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
9129 bp->mdio.dev = dev;
9130 bp->mdio.mdio_read = bnx2x_mdio_read;
9131 bp->mdio.mdio_write = bnx2x_mdio_write;
9132
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009133 return 0;
9134
9135err_out_unmap:
9136 if (bp->regview) {
9137 iounmap(bp->regview);
9138 bp->regview = NULL;
9139 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009140 if (bp->doorbells) {
9141 iounmap(bp->doorbells);
9142 bp->doorbells = NULL;
9143 }
9144
9145err_out_release:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009146 if (atomic_read(&pdev->enable_cnt) == 1)
9147 pci_release_regions(pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009148
9149err_out_disable:
9150 pci_disable_device(pdev);
9151 pci_set_drvdata(pdev, NULL);
9152
9153err_out:
9154 return rc;
9155}
9156
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009157static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
9158 int *width, int *speed)
Eliezer Tamir25047952008-02-28 11:50:16 -08009159{
9160 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9161
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009162 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
9163
9164 /* return value of 1=2.5GHz 2=5GHz */
9165 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
Eliezer Tamir25047952008-02-28 11:50:16 -08009166}
9167
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009168static int bnx2x_check_firmware(struct bnx2x *bp)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009169{
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009170 const struct firmware *firmware = bp->firmware;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009171 struct bnx2x_fw_file_hdr *fw_hdr;
9172 struct bnx2x_fw_file_section *sections;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009173 u32 offset, len, num_ops;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009174 u16 *ops_offsets;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009175 int i;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009176 const u8 *fw_ver;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009177
9178 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
9179 return -EINVAL;
9180
9181 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
9182 sections = (struct bnx2x_fw_file_section *)fw_hdr;
9183
9184 /* Make sure none of the offsets and sizes make us read beyond
9185 * the end of the firmware data */
9186 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
9187 offset = be32_to_cpu(sections[i].offset);
9188 len = be32_to_cpu(sections[i].len);
9189 if (offset + len > firmware->size) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009190 dev_err(&bp->pdev->dev,
9191 "Section %d length is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009192 return -EINVAL;
9193 }
9194 }
9195
9196 /* Likewise for the init_ops offsets */
9197 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
9198 ops_offsets = (u16 *)(firmware->data + offset);
9199 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
9200
9201 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
9202 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009203 dev_err(&bp->pdev->dev,
9204 "Section offset %d is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009205 return -EINVAL;
9206 }
9207 }
9208
9209 /* Check FW version */
9210 offset = be32_to_cpu(fw_hdr->fw_version.offset);
9211 fw_ver = firmware->data + offset;
9212 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
9213 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
9214 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
9215 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009216 dev_err(&bp->pdev->dev,
9217 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009218 fw_ver[0], fw_ver[1], fw_ver[2],
9219 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
9220 BCM_5710_FW_MINOR_VERSION,
9221 BCM_5710_FW_REVISION_VERSION,
9222 BCM_5710_FW_ENGINEERING_VERSION);
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009223 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009224 }
9225
9226 return 0;
9227}
9228
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009229static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009230{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009231 const __be32 *source = (const __be32 *)_source;
9232 u32 *target = (u32 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009233 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009234
9235 for (i = 0; i < n/4; i++)
9236 target[i] = be32_to_cpu(source[i]);
9237}
9238
9239/*
9240 Ops array is stored in the following format:
9241 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
9242 */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009243static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009244{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009245 const __be32 *source = (const __be32 *)_source;
9246 struct raw_op *target = (struct raw_op *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009247 u32 i, j, tmp;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009248
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009249 for (i = 0, j = 0; i < n/8; i++, j += 2) {
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009250 tmp = be32_to_cpu(source[j]);
9251 target[i].op = (tmp >> 24) & 0xff;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009252 target[i].offset = tmp & 0xffffff;
9253 target[i].raw_data = be32_to_cpu(source[j + 1]);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009254 }
9255}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009256
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009257/**
9258 * IRO array is stored in the following format:
9259 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
9260 */
9261static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
9262{
9263 const __be32 *source = (const __be32 *)_source;
9264 struct iro *target = (struct iro *)_target;
9265 u32 i, j, tmp;
9266
9267 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
9268 target[i].base = be32_to_cpu(source[j]);
9269 j++;
9270 tmp = be32_to_cpu(source[j]);
9271 target[i].m1 = (tmp >> 16) & 0xffff;
9272 target[i].m2 = tmp & 0xffff;
9273 j++;
9274 tmp = be32_to_cpu(source[j]);
9275 target[i].m3 = (tmp >> 16) & 0xffff;
9276 target[i].size = tmp & 0xffff;
9277 j++;
9278 }
9279}
9280
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009281static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009282{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009283 const __be16 *source = (const __be16 *)_source;
9284 u16 *target = (u16 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009285 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009286
9287 for (i = 0; i < n/2; i++)
9288 target[i] = be16_to_cpu(source[i]);
9289}
9290
Joe Perches7995c642010-02-17 15:01:52 +00009291#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
9292do { \
9293 u32 len = be32_to_cpu(fw_hdr->arr.len); \
9294 bp->arr = kmalloc(len, GFP_KERNEL); \
9295 if (!bp->arr) { \
9296 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
9297 goto lbl; \
9298 } \
9299 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
9300 (u8 *)bp->arr, len); \
9301} while (0)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009302
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009303int bnx2x_init_firmware(struct bnx2x *bp)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009304{
Ben Hutchings45229b42009-11-07 11:53:39 +00009305 const char *fw_file_name;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009306 struct bnx2x_fw_file_hdr *fw_hdr;
Ben Hutchings45229b42009-11-07 11:53:39 +00009307 int rc;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009308
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009309 if (CHIP_IS_E1(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +00009310 fw_file_name = FW_FILE_NAME_E1;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009311 else if (CHIP_IS_E1H(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +00009312 fw_file_name = FW_FILE_NAME_E1H;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009313 else if (CHIP_IS_E2(bp))
9314 fw_file_name = FW_FILE_NAME_E2;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009315 else {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009316 BNX2X_ERR("Unsupported chip revision\n");
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009317 return -EINVAL;
9318 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009319
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009320 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009321
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009322 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009323 if (rc) {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009324 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009325 goto request_firmware_exit;
9326 }
9327
9328 rc = bnx2x_check_firmware(bp);
9329 if (rc) {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009330 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009331 goto request_firmware_exit;
9332 }
9333
9334 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
9335
9336 /* Initialize the pointers to the init arrays */
9337 /* Blob */
9338 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
9339
9340 /* Opcodes */
9341 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
9342
9343 /* Offsets */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009344 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
9345 be16_to_cpu_n);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009346
9347 /* STORMs firmware */
Eilon Greenstein573f2032009-08-12 08:24:14 +00009348 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9349 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
9350 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
9351 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
9352 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9353 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
9354 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
9355 be32_to_cpu(fw_hdr->usem_pram_data.offset);
9356 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9357 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
9358 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
9359 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
9360 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9361 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
9362 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
9363 be32_to_cpu(fw_hdr->csem_pram_data.offset);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009364 /* IRO */
9365 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009366
9367 return 0;
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009368
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009369iro_alloc_err:
9370 kfree(bp->init_ops_offsets);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009371init_offsets_alloc_err:
9372 kfree(bp->init_ops);
9373init_ops_alloc_err:
9374 kfree(bp->init_data);
9375request_firmware_exit:
9376 release_firmware(bp->firmware);
9377
9378 return rc;
9379}
9380
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009381static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
9382{
9383 int cid_count = L2_FP_COUNT(l2_cid_count);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009384
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009385#ifdef BCM_CNIC
9386 cid_count += CNIC_CID_MAX;
9387#endif
9388 return roundup(cid_count, QM_CID_ROUND);
9389}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00009390
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009391static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9392 const struct pci_device_id *ent)
9393{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009394 struct net_device *dev = NULL;
9395 struct bnx2x *bp;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009396 int pcie_width, pcie_speed;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009397 int rc, cid_count;
9398
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009399 switch (ent->driver_data) {
9400 case BCM57710:
9401 case BCM57711:
9402 case BCM57711E:
9403 cid_count = FP_SB_MAX_E1x;
9404 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009405
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009406 case BCM57712:
9407 case BCM57712E:
9408 cid_count = FP_SB_MAX_E2;
9409 break;
9410
9411 default:
9412 pr_err("Unknown board_type (%ld), aborting\n",
9413 ent->driver_data);
Vasiliy Kulikov870634b2010-11-14 10:08:34 +00009414 return -ENODEV;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009415 }
9416
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00009417 cid_count += NONE_ETH_CONTEXT_USE + CNIC_CONTEXT_USE;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00009418
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009419 /* dev zeroed in init_etherdev */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009420 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009421 if (!dev) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009422 dev_err(&pdev->dev, "Cannot allocate net device\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009423 return -ENOMEM;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009424 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009425
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009426 bp = netdev_priv(dev);
Joe Perches7995c642010-02-17 15:01:52 +00009427 bp->msg_enable = debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009428
Eilon Greensteindf4770de2009-08-12 08:23:28 +00009429 pci_set_drvdata(pdev, dev);
9430
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009431 bp->l2_cid_count = cid_count;
9432
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009433 rc = bnx2x_init_dev(pdev, dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009434 if (rc < 0) {
9435 free_netdev(dev);
9436 return rc;
9437 }
9438
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009439 rc = bnx2x_init_bp(bp);
Eilon Greenstein693fc0d2009-01-14 06:43:52 +00009440 if (rc)
9441 goto init_one_exit;
9442
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009443 /* calc qm_cid_count */
9444 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9445
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00009446#ifdef BCM_CNIC
9447 /* disable FCOE L2 queue for E1x*/
9448 if (CHIP_IS_E1x(bp))
9449 bp->flags |= NO_FCOE_FLAG;
9450
9451#endif
9452
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009453 /* Configure interupt mode: try to enable MSI-X/MSI if
9454 * needed, set bp->num_queues appropriately.
9455 */
9456 bnx2x_set_int_mode(bp);
9457
9458 /* Add all NAPI objects */
9459 bnx2x_add_all_napi(bp);
9460
Vladislav Zolotarovb3400072010-11-24 11:09:50 -08009461 rc = register_netdev(dev);
9462 if (rc) {
9463 dev_err(&pdev->dev, "Cannot register net device\n");
9464 goto init_one_exit;
9465 }
9466
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00009467#ifdef BCM_CNIC
9468 if (!NO_FCOE(bp)) {
9469 /* Add storage MAC address */
9470 rtnl_lock();
9471 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
9472 rtnl_unlock();
9473 }
9474#endif
9475
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009476 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009477
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009478 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9479 " IRQ %d, ", board_info[ent->driver_data].name,
9480 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009481 pcie_width,
9482 ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9483 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9484 "5GHz (Gen2)" : "2.5GHz",
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009485 dev->base_addr, bp->pdev->irq);
9486 pr_cont("node addr %pM\n", dev->dev_addr);
Eilon Greensteinc0162012009-03-02 08:01:05 +00009487
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009488 return 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009489
9490init_one_exit:
9491 if (bp->regview)
9492 iounmap(bp->regview);
9493
9494 if (bp->doorbells)
9495 iounmap(bp->doorbells);
9496
9497 free_netdev(dev);
9498
9499 if (atomic_read(&pdev->enable_cnt) == 1)
9500 pci_release_regions(pdev);
9501
9502 pci_disable_device(pdev);
9503 pci_set_drvdata(pdev, NULL);
9504
9505 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009506}
9507
9508static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9509{
9510 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -08009511 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009512
Eliezer Tamir228241e2008-02-28 11:56:57 -08009513 if (!dev) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009514 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
Eliezer Tamir228241e2008-02-28 11:56:57 -08009515 return;
9516 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08009517 bp = netdev_priv(dev);
9518
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00009519#ifdef BCM_CNIC
9520 /* Delete storage MAC address */
9521 if (!NO_FCOE(bp)) {
9522 rtnl_lock();
9523 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
9524 rtnl_unlock();
9525 }
9526#endif
9527
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009528 unregister_netdev(dev);
9529
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009530 /* Delete all NAPI objects */
9531 bnx2x_del_all_napi(bp);
9532
Vladislav Zolotarov084d6cb2011-01-09 02:20:19 +00009533 /* Power on: we can't let PCI layer write to us while we are in D3 */
9534 bnx2x_set_power_state(bp, PCI_D0);
9535
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009536 /* Disable MSI/MSI-X */
9537 bnx2x_disable_msi(bp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00009538
Vladislav Zolotarov084d6cb2011-01-09 02:20:19 +00009539 /* Power off */
9540 bnx2x_set_power_state(bp, PCI_D3hot);
9541
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009542 /* Make sure RESET task is not scheduled before continuing */
9543 cancel_delayed_work_sync(&bp->reset_task);
9544
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009545 if (bp->regview)
9546 iounmap(bp->regview);
9547
9548 if (bp->doorbells)
9549 iounmap(bp->doorbells);
9550
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009551 bnx2x_free_mem_bp(bp);
9552
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009553 free_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009554
9555 if (atomic_read(&pdev->enable_cnt) == 1)
9556 pci_release_regions(pdev);
9557
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009558 pci_disable_device(pdev);
9559 pci_set_drvdata(pdev, NULL);
9560}
9561
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009562static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
9563{
9564 int i;
9565
9566 bp->state = BNX2X_STATE_ERROR;
9567
9568 bp->rx_mode = BNX2X_RX_MODE_NONE;
9569
9570 bnx2x_netif_stop(bp, 0);
Stanislaw Gruszkac89af1a2010-05-17 17:35:38 -07009571 netif_carrier_off(bp->dev);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009572
9573 del_timer_sync(&bp->timer);
9574 bp->stats_state = STATS_STATE_DISABLED;
9575 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
9576
9577 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009578 bnx2x_free_irq(bp);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009579
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009580 /* Free SKBs, SGEs, TPA pool and driver internals */
9581 bnx2x_free_skbs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009582
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00009583 for_each_rx_queue(bp, i)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009584 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009585
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009586 bnx2x_free_mem(bp);
9587
9588 bp->state = BNX2X_STATE_CLOSED;
9589
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009590 return 0;
9591}
9592
9593static void bnx2x_eeh_recover(struct bnx2x *bp)
9594{
9595 u32 val;
9596
9597 mutex_init(&bp->port.phy_mutex);
9598
9599 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9600 bp->link_params.shmem_base = bp->common.shmem_base;
9601 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
9602
9603 if (!bp->common.shmem_base ||
9604 (bp->common.shmem_base < 0xA0000) ||
9605 (bp->common.shmem_base >= 0xC0000)) {
9606 BNX2X_DEV_INFO("MCP not active\n");
9607 bp->flags |= NO_MCP_FLAG;
9608 return;
9609 }
9610
9611 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9612 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9613 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9614 BNX2X_ERR("BAD MCP validity signature\n");
9615
9616 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009617 bp->fw_seq =
9618 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9619 DRV_MSG_SEQ_NUMBER_MASK);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009620 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9621 }
9622}
9623
Wendy Xiong493adb12008-06-23 20:36:22 -07009624/**
9625 * bnx2x_io_error_detected - called when PCI error is detected
9626 * @pdev: Pointer to PCI device
9627 * @state: The current pci connection state
9628 *
9629 * This function is called after a PCI bus error affecting
9630 * this device has been detected.
9631 */
9632static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
9633 pci_channel_state_t state)
9634{
9635 struct net_device *dev = pci_get_drvdata(pdev);
9636 struct bnx2x *bp = netdev_priv(dev);
9637
9638 rtnl_lock();
9639
9640 netif_device_detach(dev);
9641
Dean Nelson07ce50e2009-07-31 09:13:25 +00009642 if (state == pci_channel_io_perm_failure) {
9643 rtnl_unlock();
9644 return PCI_ERS_RESULT_DISCONNECT;
9645 }
9646
Wendy Xiong493adb12008-06-23 20:36:22 -07009647 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009648 bnx2x_eeh_nic_unload(bp);
Wendy Xiong493adb12008-06-23 20:36:22 -07009649
9650 pci_disable_device(pdev);
9651
9652 rtnl_unlock();
9653
9654 /* Request a slot reset */
9655 return PCI_ERS_RESULT_NEED_RESET;
9656}
9657
9658/**
9659 * bnx2x_io_slot_reset - called after the PCI bus has been reset
9660 * @pdev: Pointer to PCI device
9661 *
9662 * Restart the card from scratch, as if from a cold-boot.
9663 */
9664static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
9665{
9666 struct net_device *dev = pci_get_drvdata(pdev);
9667 struct bnx2x *bp = netdev_priv(dev);
9668
9669 rtnl_lock();
9670
9671 if (pci_enable_device(pdev)) {
9672 dev_err(&pdev->dev,
9673 "Cannot re-enable PCI device after reset\n");
9674 rtnl_unlock();
9675 return PCI_ERS_RESULT_DISCONNECT;
9676 }
9677
9678 pci_set_master(pdev);
9679 pci_restore_state(pdev);
9680
9681 if (netif_running(dev))
9682 bnx2x_set_power_state(bp, PCI_D0);
9683
9684 rtnl_unlock();
9685
9686 return PCI_ERS_RESULT_RECOVERED;
9687}
9688
9689/**
9690 * bnx2x_io_resume - called when traffic can start flowing again
9691 * @pdev: Pointer to PCI device
9692 *
9693 * This callback is called when the error recovery driver tells us that
9694 * its OK to resume normal operation.
9695 */
9696static void bnx2x_io_resume(struct pci_dev *pdev)
9697{
9698 struct net_device *dev = pci_get_drvdata(pdev);
9699 struct bnx2x *bp = netdev_priv(dev);
9700
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009701 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009702 printk(KERN_ERR "Handling parity error recovery. "
9703 "Try again later\n");
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009704 return;
9705 }
9706
Wendy Xiong493adb12008-06-23 20:36:22 -07009707 rtnl_lock();
9708
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009709 bnx2x_eeh_recover(bp);
9710
Wendy Xiong493adb12008-06-23 20:36:22 -07009711 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009712 bnx2x_nic_load(bp, LOAD_NORMAL);
Wendy Xiong493adb12008-06-23 20:36:22 -07009713
9714 netif_device_attach(dev);
9715
9716 rtnl_unlock();
9717}
9718
9719static struct pci_error_handlers bnx2x_err_handler = {
9720 .error_detected = bnx2x_io_error_detected,
Eilon Greenstein356e2382009-02-12 08:38:32 +00009721 .slot_reset = bnx2x_io_slot_reset,
9722 .resume = bnx2x_io_resume,
Wendy Xiong493adb12008-06-23 20:36:22 -07009723};
9724
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009725static struct pci_driver bnx2x_pci_driver = {
Wendy Xiong493adb12008-06-23 20:36:22 -07009726 .name = DRV_MODULE_NAME,
9727 .id_table = bnx2x_pci_tbl,
9728 .probe = bnx2x_init_one,
9729 .remove = __devexit_p(bnx2x_remove_one),
9730 .suspend = bnx2x_suspend,
9731 .resume = bnx2x_resume,
9732 .err_handler = &bnx2x_err_handler,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009733};
9734
9735static int __init bnx2x_init(void)
9736{
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009737 int ret;
9738
Joe Perches7995c642010-02-17 15:01:52 +00009739 pr_info("%s", version);
Eilon Greenstein938cf542009-08-12 08:23:37 +00009740
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009741 bnx2x_wq = create_singlethread_workqueue("bnx2x");
9742 if (bnx2x_wq == NULL) {
Joe Perches7995c642010-02-17 15:01:52 +00009743 pr_err("Cannot create workqueue\n");
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009744 return -ENOMEM;
9745 }
9746
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009747 ret = pci_register_driver(&bnx2x_pci_driver);
9748 if (ret) {
Joe Perches7995c642010-02-17 15:01:52 +00009749 pr_err("Cannot register driver\n");
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009750 destroy_workqueue(bnx2x_wq);
9751 }
9752 return ret;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009753}
9754
9755static void __exit bnx2x_cleanup(void)
9756{
9757 pci_unregister_driver(&bnx2x_pci_driver);
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009758
9759 destroy_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009760}
9761
9762module_init(bnx2x_init);
9763module_exit(bnx2x_cleanup);
9764
Michael Chan993ac7b2009-10-10 13:46:56 +00009765#ifdef BCM_CNIC
9766
9767/* count denotes the number of new completions we have seen */
9768static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9769{
9770 struct eth_spe *spe;
9771
9772#ifdef BNX2X_STOP_ON_ERROR
9773 if (unlikely(bp->panic))
9774 return;
9775#endif
9776
9777 spin_lock_bh(&bp->spq_lock);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009778 BUG_ON(bp->cnic_spq_pending < count);
Michael Chan993ac7b2009-10-10 13:46:56 +00009779 bp->cnic_spq_pending -= count;
9780
Michael Chan993ac7b2009-10-10 13:46:56 +00009781
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009782 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
9783 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
9784 & SPE_HDR_CONN_TYPE) >>
9785 SPE_HDR_CONN_TYPE_SHIFT;
9786
9787 /* Set validation for iSCSI L2 client before sending SETUP
9788 * ramrod
9789 */
9790 if (type == ETH_CONNECTION_TYPE) {
9791 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
9792 hdr.conn_and_cmd_data) >>
9793 SPE_HDR_CMD_ID_SHIFT) & 0xff;
9794
9795 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
9796 bnx2x_set_ctx_validation(&bp->context.
9797 vcxt[BNX2X_ISCSI_ETH_CID].eth,
9798 HW_CID(bp, BNX2X_ISCSI_ETH_CID));
9799 }
9800
9801 /* There may be not more than 8 L2 and COMMON SPEs and not more
9802 * than 8 L5 SPEs in the air.
9803 */
9804 if ((type == NONE_CONNECTION_TYPE) ||
9805 (type == ETH_CONNECTION_TYPE)) {
9806 if (!atomic_read(&bp->spq_left))
9807 break;
9808 else
9809 atomic_dec(&bp->spq_left);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00009810 } else if ((type == ISCSI_CONNECTION_TYPE) ||
9811 (type == FCOE_CONNECTION_TYPE)) {
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009812 if (bp->cnic_spq_pending >=
9813 bp->cnic_eth_dev.max_kwqe_pending)
9814 break;
9815 else
9816 bp->cnic_spq_pending++;
9817 } else {
9818 BNX2X_ERR("Unknown SPE type: %d\n", type);
9819 bnx2x_panic();
Michael Chan993ac7b2009-10-10 13:46:56 +00009820 break;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009821 }
Michael Chan993ac7b2009-10-10 13:46:56 +00009822
9823 spe = bnx2x_sp_get_next(bp);
9824 *spe = *bp->cnic_kwq_cons;
9825
Michael Chan993ac7b2009-10-10 13:46:56 +00009826 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
9827 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
9828
9829 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
9830 bp->cnic_kwq_cons = bp->cnic_kwq;
9831 else
9832 bp->cnic_kwq_cons++;
9833 }
9834 bnx2x_sp_prod_update(bp);
9835 spin_unlock_bh(&bp->spq_lock);
9836}
9837
9838static int bnx2x_cnic_sp_queue(struct net_device *dev,
9839 struct kwqe_16 *kwqes[], u32 count)
9840{
9841 struct bnx2x *bp = netdev_priv(dev);
9842 int i;
9843
9844#ifdef BNX2X_STOP_ON_ERROR
9845 if (unlikely(bp->panic))
9846 return -EIO;
9847#endif
9848
9849 spin_lock_bh(&bp->spq_lock);
9850
9851 for (i = 0; i < count; i++) {
9852 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
9853
9854 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
9855 break;
9856
9857 *bp->cnic_kwq_prod = *spe;
9858
9859 bp->cnic_kwq_pending++;
9860
9861 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
9862 spe->hdr.conn_and_cmd_data, spe->hdr.type,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009863 spe->data.update_data_addr.hi,
9864 spe->data.update_data_addr.lo,
Michael Chan993ac7b2009-10-10 13:46:56 +00009865 bp->cnic_kwq_pending);
9866
9867 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
9868 bp->cnic_kwq_prod = bp->cnic_kwq;
9869 else
9870 bp->cnic_kwq_prod++;
9871 }
9872
9873 spin_unlock_bh(&bp->spq_lock);
9874
9875 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
9876 bnx2x_cnic_sp_post(bp, 0);
9877
9878 return i;
9879}
9880
9881static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9882{
9883 struct cnic_ops *c_ops;
9884 int rc = 0;
9885
9886 mutex_lock(&bp->cnic_mutex);
9887 c_ops = bp->cnic_ops;
9888 if (c_ops)
9889 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9890 mutex_unlock(&bp->cnic_mutex);
9891
9892 return rc;
9893}
9894
9895static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9896{
9897 struct cnic_ops *c_ops;
9898 int rc = 0;
9899
9900 rcu_read_lock();
9901 c_ops = rcu_dereference(bp->cnic_ops);
9902 if (c_ops)
9903 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9904 rcu_read_unlock();
9905
9906 return rc;
9907}
9908
9909/*
9910 * for commands that have no data
9911 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00009912int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
Michael Chan993ac7b2009-10-10 13:46:56 +00009913{
9914 struct cnic_ctl_info ctl = {0};
9915
9916 ctl.cmd = cmd;
9917
9918 return bnx2x_cnic_ctl_send(bp, &ctl);
9919}
9920
9921static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
9922{
9923 struct cnic_ctl_info ctl;
9924
9925 /* first we tell CNIC and only then we count this as a completion */
9926 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
9927 ctl.data.comp.cid = cid;
9928
9929 bnx2x_cnic_ctl_send_bh(bp, &ctl);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009930 bnx2x_cnic_sp_post(bp, 0);
Michael Chan993ac7b2009-10-10 13:46:56 +00009931}
9932
9933static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
9934{
9935 struct bnx2x *bp = netdev_priv(dev);
9936 int rc = 0;
9937
9938 switch (ctl->cmd) {
9939 case DRV_CTL_CTXTBL_WR_CMD: {
9940 u32 index = ctl->data.io.offset;
9941 dma_addr_t addr = ctl->data.io.dma_addr;
9942
9943 bnx2x_ilt_wr(bp, index, addr);
9944 break;
9945 }
9946
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009947 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
9948 int count = ctl->data.credit.credit_count;
Michael Chan993ac7b2009-10-10 13:46:56 +00009949
9950 bnx2x_cnic_sp_post(bp, count);
9951 break;
9952 }
9953
9954 /* rtnl_lock is held. */
9955 case DRV_CTL_START_L2_CMD: {
9956 u32 cli = ctl->data.ring.client_id;
9957
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00009958 /* Clear FCoE FIP and ALL ENODE MACs addresses first */
9959 bnx2x_del_fcoe_eth_macs(bp);
9960
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009961 /* Set iSCSI MAC address */
9962 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
9963
9964 mmiowb();
9965 barrier();
9966
9967 /* Start accepting on iSCSI L2 ring. Accept all multicasts
9968 * because it's the only way for UIO Client to accept
9969 * multicasts (in non-promiscuous mode only one Client per
9970 * function will receive multicast packets (leading in our
9971 * case).
9972 */
9973 bnx2x_rxq_set_mac_filters(bp, cli,
9974 BNX2X_ACCEPT_UNICAST |
9975 BNX2X_ACCEPT_BROADCAST |
9976 BNX2X_ACCEPT_ALL_MULTICAST);
9977 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9978
Michael Chan993ac7b2009-10-10 13:46:56 +00009979 break;
9980 }
9981
9982 /* rtnl_lock is held. */
9983 case DRV_CTL_STOP_L2_CMD: {
9984 u32 cli = ctl->data.ring.client_id;
9985
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009986 /* Stop accepting on iSCSI L2 ring */
9987 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
9988 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9989
9990 mmiowb();
9991 barrier();
9992
9993 /* Unset iSCSI L2 MAC */
9994 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
Michael Chan993ac7b2009-10-10 13:46:56 +00009995 break;
9996 }
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009997 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
9998 int count = ctl->data.credit.credit_count;
9999
10000 smp_mb__before_atomic_inc();
10001 atomic_add(count, &bp->spq_left);
10002 smp_mb__after_atomic_inc();
10003 break;
10004 }
Michael Chan993ac7b2009-10-10 13:46:56 +000010005
10006 default:
10007 BNX2X_ERR("unknown command %x\n", ctl->cmd);
10008 rc = -EINVAL;
10009 }
10010
10011 return rc;
10012}
10013
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000010014void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
Michael Chan993ac7b2009-10-10 13:46:56 +000010015{
10016 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10017
10018 if (bp->flags & USING_MSIX_FLAG) {
10019 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
10020 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
10021 cp->irq_arr[0].vector = bp->msix_table[1].vector;
10022 } else {
10023 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
10024 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
10025 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000010026 if (CHIP_IS_E2(bp))
10027 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
10028 else
10029 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
10030
Michael Chan993ac7b2009-10-10 13:46:56 +000010031 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +000010032 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
Michael Chan993ac7b2009-10-10 13:46:56 +000010033 cp->irq_arr[1].status_blk = bp->def_status_blk;
10034 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
Dmitry Kravkov523224a2010-10-06 03:23:26 +000010035 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
Michael Chan993ac7b2009-10-10 13:46:56 +000010036
10037 cp->num_irq = 2;
10038}
10039
10040static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
10041 void *data)
10042{
10043 struct bnx2x *bp = netdev_priv(dev);
10044 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10045
10046 if (ops == NULL)
10047 return -EINVAL;
10048
10049 if (atomic_read(&bp->intr_sem) != 0)
10050 return -EBUSY;
10051
10052 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
10053 if (!bp->cnic_kwq)
10054 return -ENOMEM;
10055
10056 bp->cnic_kwq_cons = bp->cnic_kwq;
10057 bp->cnic_kwq_prod = bp->cnic_kwq;
10058 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
10059
10060 bp->cnic_spq_pending = 0;
10061 bp->cnic_kwq_pending = 0;
10062
10063 bp->cnic_data = data;
10064
10065 cp->num_irq = 0;
10066 cp->drv_state = CNIC_DRV_STATE_REGD;
Dmitry Kravkov523224a2010-10-06 03:23:26 +000010067 cp->iro_arr = bp->iro_arr;
Michael Chan993ac7b2009-10-10 13:46:56 +000010068
Michael Chan993ac7b2009-10-10 13:46:56 +000010069 bnx2x_setup_cnic_irq_info(bp);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +000010070
Michael Chan993ac7b2009-10-10 13:46:56 +000010071 rcu_assign_pointer(bp->cnic_ops, ops);
10072
10073 return 0;
10074}
10075
10076static int bnx2x_unregister_cnic(struct net_device *dev)
10077{
10078 struct bnx2x *bp = netdev_priv(dev);
10079 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10080
10081 mutex_lock(&bp->cnic_mutex);
Michael Chan993ac7b2009-10-10 13:46:56 +000010082 cp->drv_state = 0;
10083 rcu_assign_pointer(bp->cnic_ops, NULL);
10084 mutex_unlock(&bp->cnic_mutex);
10085 synchronize_rcu();
10086 kfree(bp->cnic_kwq);
10087 bp->cnic_kwq = NULL;
10088
10089 return 0;
10090}
10091
10092struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
10093{
10094 struct bnx2x *bp = netdev_priv(dev);
10095 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10096
10097 cp->drv_owner = THIS_MODULE;
10098 cp->chip_id = CHIP_ID(bp);
10099 cp->pdev = bp->pdev;
10100 cp->io_base = bp->regview;
10101 cp->io_base2 = bp->doorbells;
10102 cp->max_kwqe_pending = 8;
Dmitry Kravkov523224a2010-10-06 03:23:26 +000010103 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +000010104 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
10105 bnx2x_cid_ilt_lines(bp);
Michael Chan993ac7b2009-10-10 13:46:56 +000010106 cp->ctx_tbl_len = CNIC_ILT_LINES;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +000010107 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
Michael Chan993ac7b2009-10-10 13:46:56 +000010108 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
10109 cp->drv_ctl = bnx2x_drv_ctl;
10110 cp->drv_register_cnic = bnx2x_register_cnic;
10111 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +000010112 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
10113 cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID +
10114 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +000010115 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
Michael Chan993ac7b2009-10-10 13:46:56 +000010116
Dmitry Kravkovc2bff632010-10-06 03:33:18 +000010117 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
10118 "starting cid %d\n",
10119 cp->ctx_blk_size,
10120 cp->ctx_tbl_offset,
10121 cp->ctx_tbl_len,
10122 cp->starting_cid);
Michael Chan993ac7b2009-10-10 13:46:56 +000010123 return cp;
10124}
10125EXPORT_SYMBOL(bnx2x_cnic_probe);
10126
10127#endif /* BCM_CNIC */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070010128