blob: 032ae184b605dd0ebe67eb50a4bff6d723e66271 [file] [log] [blame]
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001/* bnx2x_main.c: Broadcom Everest network driver.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002 *
Vladislav Zolotarov3359fce2010-02-17 13:35:01 -08003 * Copyright (c) 2007-2010 Broadcom Corporation
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
Eilon Greenstein24e3fce2008-06-12 14:30:28 -07009 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
Eilon Greensteinca003922009-08-12 22:53:28 -070013 * Slowpath and fastpath rework by Vladislav Zolotarov
Eliezer Tamirc14423f2008-02-28 11:49:42 -080014 * Statistics and Link management by Yitchak Gertner
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020015 *
16 */
17
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020018#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020026#include <linux/interrupt.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/dma-mapping.h>
33#include <linux/bitops.h>
34#include <linux/irq.h>
35#include <linux/delay.h>
36#include <asm/byteorder.h>
37#include <linux/time.h>
38#include <linux/ethtool.h>
39#include <linux/mii.h>
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080040#include <linux/if_vlan.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020041#include <net/ip.h>
42#include <net/tcp.h>
43#include <net/checksum.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070044#include <net/ip6_checksum.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020045#include <linux/workqueue.h>
46#include <linux/crc32.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070047#include <linux/crc32c.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020048#include <linux/prefetch.h>
49#include <linux/zlib.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020050#include <linux/io.h>
Ben Hutchings45229b42009-11-07 11:53:39 +000051#include <linux/stringify.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020052
Dmitry Kravkovb0efbb92010-07-27 12:33:43 +000053#define BNX2X_MAIN
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020054#include "bnx2x.h"
55#include "bnx2x_init.h"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070056#include "bnx2x_init_ops.h"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000057#include "bnx2x_cmn.h"
Vladislav Zolotarove4901dd2010-12-13 05:44:18 +000058#include "bnx2x_dcb.h"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020059
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070060#include <linux/firmware.h>
61#include "bnx2x_fw_file_hdr.h"
62/* FW files */
Ben Hutchings45229b42009-11-07 11:53:39 +000063#define FW_FILE_VERSION \
64 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
65 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
66 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
67 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
Dmitry Kravkov560131f2010-10-06 03:18:47 +000068#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
69#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000070#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070071
Eilon Greenstein34f80b02008-06-23 20:33:01 -070072/* Time in jiffies before concluding the transmitter is hung */
73#define TX_TIMEOUT (5*HZ)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020074
Andrew Morton53a10562008-02-09 23:16:41 -080075static char version[] __devinitdata =
Eilon Greenstein34f80b02008-06-23 20:33:01 -070076 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020077 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78
Eilon Greenstein24e3fce2008-06-12 14:30:28 -070079MODULE_AUTHOR("Eliezer Tamir");
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000080MODULE_DESCRIPTION("Broadcom NetXtreme II "
81 "BCM57710/57711/57711E/57712/57712E Driver");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020082MODULE_LICENSE("GPL");
83MODULE_VERSION(DRV_MODULE_VERSION);
Ben Hutchings45229b42009-11-07 11:53:39 +000084MODULE_FIRMWARE(FW_FILE_NAME_E1);
85MODULE_FIRMWARE(FW_FILE_NAME_E1H);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000086MODULE_FIRMWARE(FW_FILE_NAME_E2);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020087
Eilon Greenstein555f6c72009-02-12 08:36:11 +000088static int multi_mode = 1;
89module_param(multi_mode, int, 0);
Eilon Greensteinca003922009-08-12 22:53:28 -070090MODULE_PARM_DESC(multi_mode, " Multi queue mode "
91 "(0 Disable; 1 Enable (default))");
92
Dmitry Kravkovd6214d72010-10-06 03:32:10 +000093int num_queues;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000094module_param(num_queues, int, 0);
95MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
96 " (default is as a number of CPUs)");
Eilon Greenstein555f6c72009-02-12 08:36:11 +000097
Eilon Greenstein19680c42008-08-13 15:47:33 -070098static int disable_tpa;
Eilon Greenstein19680c42008-08-13 15:47:33 -070099module_param(disable_tpa, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000100MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
Eilon Greenstein8badd272009-02-12 08:36:15 +0000101
102static int int_mode;
103module_param(int_mode, int, 0);
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000104MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
105 "(1 INT#x; 2 MSI)");
Eilon Greenstein8badd272009-02-12 08:36:15 +0000106
Eilon Greensteina18f5122009-08-12 08:23:26 +0000107static int dropless_fc;
108module_param(dropless_fc, int, 0);
109MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110
Eilon Greenstein9898f862009-02-12 08:38:27 +0000111static int poll;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200112module_param(poll, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000113MODULE_PARM_DESC(poll, " Use polling (for debug)");
Eilon Greenstein8d5726c2009-02-12 08:37:19 +0000114
115static int mrrs = -1;
116module_param(mrrs, int, 0);
117MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
Eilon Greenstein9898f862009-02-12 08:38:27 +0000119static int debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200120module_param(debug, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000121MODULE_PARM_DESC(debug, " Default debug msglevel");
122
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800123static struct workqueue_struct *bnx2x_wq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200124
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000125#ifdef BCM_CNIC
126static u8 ALL_ENODE_MACS[] = {0x01, 0x10, 0x18, 0x01, 0x00, 0x01};
127#endif
128
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200129enum bnx2x_board_type {
130 BCM57710 = 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700131 BCM57711 = 1,
132 BCM57711E = 2,
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000133 BCM57712 = 3,
134 BCM57712E = 4
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200135};
136
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700137/* indexed by board_type, above */
Andrew Morton53a10562008-02-09 23:16:41 -0800138static struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200139 char *name;
140} board_info[] __devinitdata = {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700141 { "Broadcom NetXtreme II BCM57710 XGb" },
142 { "Broadcom NetXtreme II BCM57711 XGb" },
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000143 { "Broadcom NetXtreme II BCM57711E XGb" },
144 { "Broadcom NetXtreme II BCM57712 XGb" },
145 { "Broadcom NetXtreme II BCM57712E XGb" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200146};
147
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000148#ifndef PCI_DEVICE_ID_NX2_57712
149#define PCI_DEVICE_ID_NX2_57712 0x1662
150#endif
151#ifndef PCI_DEVICE_ID_NX2_57712E
152#define PCI_DEVICE_ID_NX2_57712E 0x1663
153#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700154
Alexey Dobriyana3aa1882010-01-07 11:58:11 +0000155static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
Eilon Greensteine4ed7112009-08-12 08:24:10 +0000156 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
157 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
158 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000159 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
160 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200161 { 0 }
162};
163
164MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
165
166/****************************************************************************
167* General service functions
168****************************************************************************/
169
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000170static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
171 u32 addr, dma_addr_t mapping)
172{
173 REG_WR(bp, addr, U64_LO(mapping));
174 REG_WR(bp, addr + 4, U64_HI(mapping));
175}
176
177static inline void __storm_memset_fill(struct bnx2x *bp,
178 u32 addr, size_t size, u32 val)
179{
180 int i;
181 for (i = 0; i < size/4; i++)
182 REG_WR(bp, addr + (i * 4), val);
183}
184
185static inline void storm_memset_ustats_zero(struct bnx2x *bp,
186 u8 port, u16 stat_id)
187{
188 size_t size = sizeof(struct ustorm_per_client_stats);
189
190 u32 addr = BAR_USTRORM_INTMEM +
191 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
192
193 __storm_memset_fill(bp, addr, size, 0);
194}
195
196static inline void storm_memset_tstats_zero(struct bnx2x *bp,
197 u8 port, u16 stat_id)
198{
199 size_t size = sizeof(struct tstorm_per_client_stats);
200
201 u32 addr = BAR_TSTRORM_INTMEM +
202 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
203
204 __storm_memset_fill(bp, addr, size, 0);
205}
206
207static inline void storm_memset_xstats_zero(struct bnx2x *bp,
208 u8 port, u16 stat_id)
209{
210 size_t size = sizeof(struct xstorm_per_client_stats);
211
212 u32 addr = BAR_XSTRORM_INTMEM +
213 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
214
215 __storm_memset_fill(bp, addr, size, 0);
216}
217
218
219static inline void storm_memset_spq_addr(struct bnx2x *bp,
220 dma_addr_t mapping, u16 abs_fid)
221{
222 u32 addr = XSEM_REG_FAST_MEMORY +
223 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
224
225 __storm_memset_dma_mapping(bp, addr, mapping);
226}
227
228static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
229{
230 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
231}
232
233static inline void storm_memset_func_cfg(struct bnx2x *bp,
234 struct tstorm_eth_function_common_config *tcfg,
235 u16 abs_fid)
236{
237 size_t size = sizeof(struct tstorm_eth_function_common_config);
238
239 u32 addr = BAR_TSTRORM_INTMEM +
240 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
241
242 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
243}
244
245static inline void storm_memset_xstats_flags(struct bnx2x *bp,
246 struct stats_indication_flags *flags,
247 u16 abs_fid)
248{
249 size_t size = sizeof(struct stats_indication_flags);
250
251 u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
252
253 __storm_memset_struct(bp, addr, size, (u32 *)flags);
254}
255
256static inline void storm_memset_tstats_flags(struct bnx2x *bp,
257 struct stats_indication_flags *flags,
258 u16 abs_fid)
259{
260 size_t size = sizeof(struct stats_indication_flags);
261
262 u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
263
264 __storm_memset_struct(bp, addr, size, (u32 *)flags);
265}
266
267static inline void storm_memset_ustats_flags(struct bnx2x *bp,
268 struct stats_indication_flags *flags,
269 u16 abs_fid)
270{
271 size_t size = sizeof(struct stats_indication_flags);
272
273 u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
274
275 __storm_memset_struct(bp, addr, size, (u32 *)flags);
276}
277
278static inline void storm_memset_cstats_flags(struct bnx2x *bp,
279 struct stats_indication_flags *flags,
280 u16 abs_fid)
281{
282 size_t size = sizeof(struct stats_indication_flags);
283
284 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
285
286 __storm_memset_struct(bp, addr, size, (u32 *)flags);
287}
288
289static inline void storm_memset_xstats_addr(struct bnx2x *bp,
290 dma_addr_t mapping, u16 abs_fid)
291{
292 u32 addr = BAR_XSTRORM_INTMEM +
293 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
294
295 __storm_memset_dma_mapping(bp, addr, mapping);
296}
297
298static inline void storm_memset_tstats_addr(struct bnx2x *bp,
299 dma_addr_t mapping, u16 abs_fid)
300{
301 u32 addr = BAR_TSTRORM_INTMEM +
302 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
303
304 __storm_memset_dma_mapping(bp, addr, mapping);
305}
306
307static inline void storm_memset_ustats_addr(struct bnx2x *bp,
308 dma_addr_t mapping, u16 abs_fid)
309{
310 u32 addr = BAR_USTRORM_INTMEM +
311 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
312
313 __storm_memset_dma_mapping(bp, addr, mapping);
314}
315
316static inline void storm_memset_cstats_addr(struct bnx2x *bp,
317 dma_addr_t mapping, u16 abs_fid)
318{
319 u32 addr = BAR_CSTRORM_INTMEM +
320 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
321
322 __storm_memset_dma_mapping(bp, addr, mapping);
323}
324
325static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
326 u16 pf_id)
327{
328 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
329 pf_id);
330 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
331 pf_id);
332 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
333 pf_id);
334 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
335 pf_id);
336}
337
338static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
339 u8 enable)
340{
341 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
342 enable);
343 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
344 enable);
345 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
346 enable);
347 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
348 enable);
349}
350
351static inline void storm_memset_eq_data(struct bnx2x *bp,
352 struct event_ring_data *eq_data,
353 u16 pfid)
354{
355 size_t size = sizeof(struct event_ring_data);
356
357 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
358
359 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
360}
361
362static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
363 u16 pfid)
364{
365 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
366 REG_WR16(bp, addr, eq_prod);
367}
368
369static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
370 u16 fw_sb_id, u8 sb_index,
371 u8 ticks)
372{
373
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000374 int index_offset = CHIP_IS_E2(bp) ?
375 offsetof(struct hc_status_block_data_e2, index_data) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000376 offsetof(struct hc_status_block_data_e1x, index_data);
377 u32 addr = BAR_CSTRORM_INTMEM +
378 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
379 index_offset +
380 sizeof(struct hc_index_data)*sb_index +
381 offsetof(struct hc_index_data, timeout);
382 REG_WR8(bp, addr, ticks);
383 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
384 port, fw_sb_id, sb_index, ticks);
385}
386static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
387 u16 fw_sb_id, u8 sb_index,
388 u8 disable)
389{
390 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000391 int index_offset = CHIP_IS_E2(bp) ?
392 offsetof(struct hc_status_block_data_e2, index_data) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000393 offsetof(struct hc_status_block_data_e1x, index_data);
394 u32 addr = BAR_CSTRORM_INTMEM +
395 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
396 index_offset +
397 sizeof(struct hc_index_data)*sb_index +
398 offsetof(struct hc_index_data, flags);
399 u16 flags = REG_RD16(bp, addr);
400 /* clear and set */
401 flags &= ~HC_INDEX_DATA_HC_ENABLED;
402 flags |= enable_flag;
403 REG_WR16(bp, addr, flags);
404 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
405 port, fw_sb_id, sb_index, disable);
406}
407
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200408/* used only at init
409 * locking is done by mcp
410 */
stephen hemminger8d962862010-10-21 07:50:56 +0000411static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200412{
413 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
414 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
415 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
416 PCICFG_VENDOR_ID_OFFSET);
417}
418
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200419static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
420{
421 u32 val;
422
423 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
424 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
425 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
426 PCICFG_VENDOR_ID_OFFSET);
427
428 return val;
429}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200430
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000431#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
432#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
433#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
434#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
435#define DMAE_DP_DST_NONE "dst_addr [none]"
436
stephen hemminger8d962862010-10-21 07:50:56 +0000437static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
438 int msglvl)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000439{
440 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
441
442 switch (dmae->opcode & DMAE_COMMAND_DST) {
443 case DMAE_CMD_DST_PCI:
444 if (src_type == DMAE_CMD_SRC_PCI)
445 DP(msglvl, "DMAE: opcode 0x%08x\n"
446 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
447 "comp_addr [%x:%08x], comp_val 0x%08x\n",
448 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
449 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
450 dmae->comp_addr_hi, dmae->comp_addr_lo,
451 dmae->comp_val);
452 else
453 DP(msglvl, "DMAE: opcode 0x%08x\n"
454 "src [%08x], len [%d*4], dst [%x:%08x]\n"
455 "comp_addr [%x:%08x], comp_val 0x%08x\n",
456 dmae->opcode, dmae->src_addr_lo >> 2,
457 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
458 dmae->comp_addr_hi, dmae->comp_addr_lo,
459 dmae->comp_val);
460 break;
461 case DMAE_CMD_DST_GRC:
462 if (src_type == DMAE_CMD_SRC_PCI)
463 DP(msglvl, "DMAE: opcode 0x%08x\n"
464 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
465 "comp_addr [%x:%08x], comp_val 0x%08x\n",
466 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
467 dmae->len, dmae->dst_addr_lo >> 2,
468 dmae->comp_addr_hi, dmae->comp_addr_lo,
469 dmae->comp_val);
470 else
471 DP(msglvl, "DMAE: opcode 0x%08x\n"
472 "src [%08x], len [%d*4], dst [%08x]\n"
473 "comp_addr [%x:%08x], comp_val 0x%08x\n",
474 dmae->opcode, dmae->src_addr_lo >> 2,
475 dmae->len, dmae->dst_addr_lo >> 2,
476 dmae->comp_addr_hi, dmae->comp_addr_lo,
477 dmae->comp_val);
478 break;
479 default:
480 if (src_type == DMAE_CMD_SRC_PCI)
481 DP(msglvl, "DMAE: opcode 0x%08x\n"
482 DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
483 "dst_addr [none]\n"
484 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
485 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
486 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
487 dmae->comp_val);
488 else
489 DP(msglvl, "DMAE: opcode 0x%08x\n"
490 DP_LEVEL "src_addr [%08x] len [%d * 4] "
491 "dst_addr [none]\n"
492 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
493 dmae->opcode, dmae->src_addr_lo >> 2,
494 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
495 dmae->comp_val);
496 break;
497 }
498
499}
500
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000501const u32 dmae_reg_go_c[] = {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200502 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
503 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
504 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
505 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
506};
507
508/* copy command into DMAE command memory and set DMAE command go */
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000509void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200510{
511 u32 cmd_offset;
512 int i;
513
514 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
515 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
516 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
517
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700518 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
519 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200520 }
521 REG_WR(bp, dmae_reg_go_c[idx], 1);
522}
523
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000524u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
525{
526 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
527 DMAE_CMD_C_ENABLE);
528}
529
530u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
531{
532 return opcode & ~DMAE_CMD_SRC_RESET;
533}
534
535u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
536 bool with_comp, u8 comp_type)
537{
538 u32 opcode = 0;
539
540 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
541 (dst_type << DMAE_COMMAND_DST_SHIFT));
542
543 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
544
545 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
546 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
547 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
548 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
549
550#ifdef __BIG_ENDIAN
551 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
552#else
553 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
554#endif
555 if (with_comp)
556 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
557 return opcode;
558}
559
stephen hemminger8d962862010-10-21 07:50:56 +0000560static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
561 struct dmae_command *dmae,
562 u8 src_type, u8 dst_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000563{
564 memset(dmae, 0, sizeof(struct dmae_command));
565
566 /* set the opcode */
567 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
568 true, DMAE_COMP_PCI);
569
570 /* fill in the completion parameters */
571 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
572 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
573 dmae->comp_val = DMAE_COMP_VAL;
574}
575
576/* issue a dmae command over the init-channel and wailt for completion */
stephen hemminger8d962862010-10-21 07:50:56 +0000577static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
578 struct dmae_command *dmae)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000579{
580 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
581 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
582 int rc = 0;
583
584 DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
585 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
586 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
587
588 /* lock the dmae channel */
589 mutex_lock(&bp->dmae_mutex);
590
591 /* reset completion */
592 *wb_comp = 0;
593
594 /* post the command on the channel used for initializations */
595 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
596
597 /* wait for completion */
598 udelay(5);
599 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
600 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
601
602 if (!cnt) {
603 BNX2X_ERR("DMAE timeout!\n");
604 rc = DMAE_TIMEOUT;
605 goto unlock;
606 }
607 cnt--;
608 udelay(50);
609 }
610 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
611 BNX2X_ERR("DMAE PCI error!\n");
612 rc = DMAE_PCI_ERROR;
613 }
614
615 DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
616 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
617 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
618
619unlock:
620 mutex_unlock(&bp->dmae_mutex);
621 return rc;
622}
623
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700624void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
625 u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200626{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000627 struct dmae_command dmae;
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700628
629 if (!bp->dmae_ready) {
630 u32 *data = bnx2x_sp(bp, wb_data[0]);
631
632 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
633 " using indirect\n", dst_addr, len32);
634 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
635 return;
636 }
637
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000638 /* set opcode and fixed command fields */
639 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200640
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000641 /* fill in addresses and len */
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000642 dmae.src_addr_lo = U64_LO(dma_addr);
643 dmae.src_addr_hi = U64_HI(dma_addr);
644 dmae.dst_addr_lo = dst_addr >> 2;
645 dmae.dst_addr_hi = 0;
646 dmae.len = len32;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200647
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000648 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200649
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000650 /* issue the command and wait for completion */
651 bnx2x_issue_dmae_with_comp(bp, &dmae);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200652}
653
Yaniv Rosnerc18487e2008-06-23 20:27:52 -0700654void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200655{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000656 struct dmae_command dmae;
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700657
658 if (!bp->dmae_ready) {
659 u32 *data = bnx2x_sp(bp, wb_data[0]);
660 int i;
661
662 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
663 " using indirect\n", src_addr, len32);
664 for (i = 0; i < len32; i++)
665 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
666 return;
667 }
668
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000669 /* set opcode and fixed command fields */
670 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200671
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000672 /* fill in addresses and len */
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000673 dmae.src_addr_lo = src_addr >> 2;
674 dmae.src_addr_hi = 0;
675 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
676 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
677 dmae.len = len32;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200678
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000679 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200680
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000681 /* issue the command and wait for completion */
682 bnx2x_issue_dmae_with_comp(bp, &dmae);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200683}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200684
stephen hemminger8d962862010-10-21 07:50:56 +0000685static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
686 u32 addr, u32 len)
Eilon Greenstein573f2032009-08-12 08:24:14 +0000687{
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000688 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
Eilon Greenstein573f2032009-08-12 08:24:14 +0000689 int offset = 0;
690
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000691 while (len > dmae_wr_max) {
Eilon Greenstein573f2032009-08-12 08:24:14 +0000692 bnx2x_write_dmae(bp, phys_addr + offset,
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000693 addr + offset, dmae_wr_max);
694 offset += dmae_wr_max * 4;
695 len -= dmae_wr_max;
Eilon Greenstein573f2032009-08-12 08:24:14 +0000696 }
697
698 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
699}
700
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700701/* used only for slowpath so not inlined */
702static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
703{
704 u32 wb_write[2];
705
706 wb_write[0] = val_hi;
707 wb_write[1] = val_lo;
708 REG_WR_DMAE(bp, reg, wb_write, 2);
709}
710
711#ifdef USE_WB_RD
712static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
713{
714 u32 wb_data[2];
715
716 REG_RD_DMAE(bp, reg, wb_data, 2);
717
718 return HILO_U64(wb_data[0], wb_data[1]);
719}
720#endif
721
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200722static int bnx2x_mc_assert(struct bnx2x *bp)
723{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200724 char last_idx;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700725 int i, rc = 0;
726 u32 row0, row1, row2, row3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200727
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700728 /* XSTORM */
729 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
730 XSTORM_ASSERT_LIST_INDEX_OFFSET);
731 if (last_idx)
732 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200733
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700734 /* print the asserts */
735 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200736
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700737 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
738 XSTORM_ASSERT_LIST_OFFSET(i));
739 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
740 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
741 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
742 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
743 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
744 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200745
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700746 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
747 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
748 " 0x%08x 0x%08x 0x%08x\n",
749 i, row3, row2, row1, row0);
750 rc++;
751 } else {
752 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200753 }
754 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700755
756 /* TSTORM */
757 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
758 TSTORM_ASSERT_LIST_INDEX_OFFSET);
759 if (last_idx)
760 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
761
762 /* print the asserts */
763 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
764
765 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
766 TSTORM_ASSERT_LIST_OFFSET(i));
767 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
768 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
769 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
770 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
771 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
772 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
773
774 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
775 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
776 " 0x%08x 0x%08x 0x%08x\n",
777 i, row3, row2, row1, row0);
778 rc++;
779 } else {
780 break;
781 }
782 }
783
784 /* CSTORM */
785 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
786 CSTORM_ASSERT_LIST_INDEX_OFFSET);
787 if (last_idx)
788 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
789
790 /* print the asserts */
791 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
792
793 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
794 CSTORM_ASSERT_LIST_OFFSET(i));
795 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
796 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
797 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
798 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
799 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
800 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
801
802 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
803 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
804 " 0x%08x 0x%08x 0x%08x\n",
805 i, row3, row2, row1, row0);
806 rc++;
807 } else {
808 break;
809 }
810 }
811
812 /* USTORM */
813 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
814 USTORM_ASSERT_LIST_INDEX_OFFSET);
815 if (last_idx)
816 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
817
818 /* print the asserts */
819 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
820
821 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
822 USTORM_ASSERT_LIST_OFFSET(i));
823 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
824 USTORM_ASSERT_LIST_OFFSET(i) + 4);
825 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
826 USTORM_ASSERT_LIST_OFFSET(i) + 8);
827 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
828 USTORM_ASSERT_LIST_OFFSET(i) + 12);
829
830 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
831 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
832 " 0x%08x 0x%08x 0x%08x\n",
833 i, row3, row2, row1, row0);
834 rc++;
835 } else {
836 break;
837 }
838 }
839
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200840 return rc;
841}
Eliezer Tamirc14423f2008-02-28 11:49:42 -0800842
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200843static void bnx2x_fw_dump(struct bnx2x *bp)
844{
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000845 u32 addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200846 u32 mark, offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +0000847 __be32 data[9];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200848 int word;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000849 u32 trace_shmem_base;
Vladislav Zolotarov2145a922010-04-19 01:13:49 +0000850 if (BP_NOMCP(bp)) {
851 BNX2X_ERR("NO MCP - can not dump\n");
852 return;
853 }
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000854
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000855 if (BP_PATH(bp) == 0)
856 trace_shmem_base = bp->common.shmem_base;
857 else
858 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
859 addr = trace_shmem_base - 0x0800 + 4;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000860 mark = REG_RD(bp, addr);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000861 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
862 + ((mark + 0x3) & ~0x3) - 0x08000000;
Joe Perches7995c642010-02-17 15:01:52 +0000863 pr_err("begin fw dump (mark 0x%x)\n", mark);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200864
Joe Perches7995c642010-02-17 15:01:52 +0000865 pr_err("");
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000866 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200867 for (word = 0; word < 8; word++)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000868 data[word] = htonl(REG_RD(bp, offset + 4*word));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200869 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000870 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200871 }
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000872 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200873 for (word = 0; word < 8; word++)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000874 data[word] = htonl(REG_RD(bp, offset + 4*word));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200875 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000876 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200877 }
Joe Perches7995c642010-02-17 15:01:52 +0000878 pr_err("end of fw dump\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200879}
880
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000881void bnx2x_panic_dump(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200882{
883 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000884 u16 j;
885 struct hc_sp_status_block_data sp_sb_data;
886 int func = BP_FUNC(bp);
887#ifdef BNX2X_STOP_ON_ERROR
888 u16 start = 0, end = 0;
889#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200890
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700891 bp->stats_state = STATS_STATE_DISABLED;
892 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
893
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200894 BNX2X_ERR("begin crash dump -----------------\n");
895
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000896 /* Indices */
897 /* Common */
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000898 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000899 " spq_prod_idx(0x%x)\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000900 bp->def_idx, bp->def_att_idx,
901 bp->attn_state, bp->spq_prod_idx);
902 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
903 bp->def_status_blk->atten_status_block.attn_bits,
904 bp->def_status_blk->atten_status_block.attn_bits_ack,
905 bp->def_status_blk->atten_status_block.status_block_id,
906 bp->def_status_blk->atten_status_block.attn_bits_index);
907 BNX2X_ERR(" def (");
908 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
909 pr_cont("0x%x%s",
910 bp->def_status_blk->sp_sb.index_values[i],
911 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000912
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000913 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
914 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
915 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
916 i*sizeof(u32));
917
918 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
919 "pf_id(0x%x) vnic_id(0x%x) "
920 "vf_id(0x%x) vf_valid (0x%x)\n",
921 sp_sb_data.igu_sb_id,
922 sp_sb_data.igu_seg_id,
923 sp_sb_data.p_func.pf_id,
924 sp_sb_data.p_func.vnic_id,
925 sp_sb_data.p_func.vf_id,
926 sp_sb_data.p_func.vf_valid);
927
928
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000929 for_each_eth_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000930 struct bnx2x_fastpath *fp = &bp->fp[i];
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000931 int loop;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000932 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000933 struct hc_status_block_data_e1x sb_data_e1x;
934 struct hc_status_block_sm *hc_sm_p =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000935 CHIP_IS_E2(bp) ?
936 sb_data_e2.common.state_machine :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000937 sb_data_e1x.common.state_machine;
938 struct hc_index_data *hc_index_p =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000939 CHIP_IS_E2(bp) ?
940 sb_data_e2.index_data :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000941 sb_data_e1x.index_data;
942 int data_size;
943 u32 *sb_data_p;
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000944
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000945 /* Rx */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000946 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000947 " rx_comp_prod(0x%x)"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000948 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000949 i, fp->rx_bd_prod, fp->rx_bd_cons,
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000950 fp->rx_comp_prod,
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000951 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000952 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000953 " fp_hc_idx(0x%x)\n",
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000954 fp->rx_sge_prod, fp->last_max_sge,
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000955 le16_to_cpu(fp->fp_hc_idx));
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000956
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000957 /* Tx */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000958 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
959 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
960 " *tx_cons_sb(0x%x)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200961 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700962 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000963
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000964 loop = CHIP_IS_E2(bp) ?
965 HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000966
967 /* host sb data */
968
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000969#ifdef BCM_CNIC
970 if (IS_FCOE_FP(fp))
971 continue;
972#endif
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000973 BNX2X_ERR(" run indexes (");
974 for (j = 0; j < HC_SB_MAX_SM; j++)
975 pr_cont("0x%x%s",
976 fp->sb_running_index[j],
977 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
978
979 BNX2X_ERR(" indexes (");
980 for (j = 0; j < loop; j++)
981 pr_cont("0x%x%s",
982 fp->sb_index_values[j],
983 (j == loop - 1) ? ")" : " ");
984 /* fw sb data */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000985 data_size = CHIP_IS_E2(bp) ?
986 sizeof(struct hc_status_block_data_e2) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000987 sizeof(struct hc_status_block_data_e1x);
988 data_size /= sizeof(u32);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000989 sb_data_p = CHIP_IS_E2(bp) ?
990 (u32 *)&sb_data_e2 :
991 (u32 *)&sb_data_e1x;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000992 /* copy sb data in here */
993 for (j = 0; j < data_size; j++)
994 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
995 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
996 j * sizeof(u32));
997
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000998 if (CHIP_IS_E2(bp)) {
999 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
1000 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
1001 sb_data_e2.common.p_func.pf_id,
1002 sb_data_e2.common.p_func.vf_id,
1003 sb_data_e2.common.p_func.vf_valid,
1004 sb_data_e2.common.p_func.vnic_id,
1005 sb_data_e2.common.same_igu_sb_1b);
1006 } else {
1007 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
1008 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
1009 sb_data_e1x.common.p_func.pf_id,
1010 sb_data_e1x.common.p_func.vf_id,
1011 sb_data_e1x.common.p_func.vf_valid,
1012 sb_data_e1x.common.p_func.vnic_id,
1013 sb_data_e1x.common.same_igu_sb_1b);
1014 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001015
1016 /* SB_SMs data */
1017 for (j = 0; j < HC_SB_MAX_SM; j++) {
1018 pr_cont("SM[%d] __flags (0x%x) "
1019 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
1020 "time_to_expire (0x%x) "
1021 "timer_value(0x%x)\n", j,
1022 hc_sm_p[j].__flags,
1023 hc_sm_p[j].igu_sb_id,
1024 hc_sm_p[j].igu_seg_id,
1025 hc_sm_p[j].time_to_expire,
1026 hc_sm_p[j].timer_value);
1027 }
1028
1029 /* Indecies data */
1030 for (j = 0; j < loop; j++) {
1031 pr_cont("INDEX[%d] flags (0x%x) "
1032 "timeout (0x%x)\n", j,
1033 hc_index_p[j].flags,
1034 hc_index_p[j].timeout);
1035 }
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001036 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001037
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001038#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001039 /* Rings */
1040 /* Rx */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001041 for_each_rx_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001042 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001043
1044 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1045 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001046 for (j = start; j != end; j = RX_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001047 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1048 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1049
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001050 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1051 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001052 }
1053
Eilon Greenstein3196a882008-08-13 15:58:49 -07001054 start = RX_SGE(fp->rx_sge_prod);
1055 end = RX_SGE(fp->last_max_sge);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001056 for (j = start; j != end; j = RX_SGE(j + 1)) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001057 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1058 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1059
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001060 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1061 i, j, rx_sge[1], rx_sge[0], sw_page->page);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001062 }
1063
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001064 start = RCQ_BD(fp->rx_comp_cons - 10);
1065 end = RCQ_BD(fp->rx_comp_cons + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001066 for (j = start; j != end; j = RCQ_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001067 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1068
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001069 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1070 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001071 }
1072 }
1073
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001074 /* Tx */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001075 for_each_tx_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001076 struct bnx2x_fastpath *fp = &bp->fp[i];
1077
1078 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
1079 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
1080 for (j = start; j != end; j = TX_BD(j + 1)) {
1081 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
1082
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001083 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
1084 i, j, sw_bd->skb, sw_bd->first_bd);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001085 }
1086
1087 start = TX_BD(fp->tx_bd_cons - 10);
1088 end = TX_BD(fp->tx_bd_cons + 254);
1089 for (j = start; j != end; j = TX_BD(j + 1)) {
1090 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
1091
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001092 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
1093 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001094 }
1095 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001096#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001097 bnx2x_fw_dump(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001098 bnx2x_mc_assert(bp);
1099 BNX2X_ERR("end crash dump -----------------\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001100}
1101
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001102static void bnx2x_hc_int_enable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001103{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001104 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001105 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1106 u32 val = REG_RD(bp, addr);
1107 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +00001108 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001109
1110 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001111 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1112 HC_CONFIG_0_REG_INT_LINE_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001113 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1114 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eilon Greenstein8badd272009-02-12 08:36:15 +00001115 } else if (msi) {
1116 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1117 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1118 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1119 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001120 } else {
1121 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001122 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001123 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1124 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001125
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001126 if (!CHIP_IS_E1(bp)) {
1127 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1128 val, port, addr);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001129
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001130 REG_WR(bp, addr, val);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001131
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001132 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1133 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001134 }
1135
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001136 if (CHIP_IS_E1(bp))
1137 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1138
Eilon Greenstein8badd272009-02-12 08:36:15 +00001139 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
1140 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001141
1142 REG_WR(bp, addr, val);
Eilon Greenstein37dbbf32009-07-21 05:47:33 +00001143 /*
1144 * Ensure that HC_CONFIG is written before leading/trailing edge config
1145 */
1146 mmiowb();
1147 barrier();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001148
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001149 if (!CHIP_IS_E1(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001150 /* init leading/trailing edge */
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00001151 if (IS_MF(bp)) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001152 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001153 if (bp->port.pmf)
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001154 /* enable nig and gpio3 attention */
1155 val |= 0x1100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001156 } else
1157 val = 0xffff;
1158
1159 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1160 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1161 }
Eilon Greenstein37dbbf32009-07-21 05:47:33 +00001162
1163 /* Make sure that interrupts are indeed enabled from here on */
1164 mmiowb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001165}
1166
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001167static void bnx2x_igu_int_enable(struct bnx2x *bp)
1168{
1169 u32 val;
1170 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1171 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1172
1173 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1174
1175 if (msix) {
1176 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1177 IGU_PF_CONF_SINGLE_ISR_EN);
1178 val |= (IGU_PF_CONF_FUNC_EN |
1179 IGU_PF_CONF_MSI_MSIX_EN |
1180 IGU_PF_CONF_ATTN_BIT_EN);
1181 } else if (msi) {
1182 val &= ~IGU_PF_CONF_INT_LINE_EN;
1183 val |= (IGU_PF_CONF_FUNC_EN |
1184 IGU_PF_CONF_MSI_MSIX_EN |
1185 IGU_PF_CONF_ATTN_BIT_EN |
1186 IGU_PF_CONF_SINGLE_ISR_EN);
1187 } else {
1188 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1189 val |= (IGU_PF_CONF_FUNC_EN |
1190 IGU_PF_CONF_INT_LINE_EN |
1191 IGU_PF_CONF_ATTN_BIT_EN |
1192 IGU_PF_CONF_SINGLE_ISR_EN);
1193 }
1194
1195 DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
1196 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1197
1198 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1199
1200 barrier();
1201
1202 /* init leading/trailing edge */
1203 if (IS_MF(bp)) {
1204 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1205 if (bp->port.pmf)
1206 /* enable nig and gpio3 attention */
1207 val |= 0x1100;
1208 } else
1209 val = 0xffff;
1210
1211 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1212 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1213
1214 /* Make sure that interrupts are indeed enabled from here on */
1215 mmiowb();
1216}
1217
1218void bnx2x_int_enable(struct bnx2x *bp)
1219{
1220 if (bp->common.int_block == INT_BLOCK_HC)
1221 bnx2x_hc_int_enable(bp);
1222 else
1223 bnx2x_igu_int_enable(bp);
1224}
1225
1226static void bnx2x_hc_int_disable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001227{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001228 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001229 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1230 u32 val = REG_RD(bp, addr);
1231
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001232 /*
1233 * in E1 we must use only PCI configuration space to disable
1234 * MSI/MSIX capablility
1235 * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
1236 */
1237 if (CHIP_IS_E1(bp)) {
1238 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
1239 * Use mask register to prevent from HC sending interrupts
1240 * after we exit the function
1241 */
1242 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
1243
1244 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1245 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1246 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1247 } else
1248 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1249 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1250 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1251 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001252
1253 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1254 val, port, addr);
1255
Eilon Greenstein8badd272009-02-12 08:36:15 +00001256 /* flush all outstanding writes */
1257 mmiowb();
1258
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001259 REG_WR(bp, addr, val);
1260 if (REG_RD(bp, addr) != val)
1261 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1262}
1263
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001264static void bnx2x_igu_int_disable(struct bnx2x *bp)
1265{
1266 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1267
1268 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1269 IGU_PF_CONF_INT_LINE_EN |
1270 IGU_PF_CONF_ATTN_BIT_EN);
1271
1272 DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1273
1274 /* flush all outstanding writes */
1275 mmiowb();
1276
1277 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1278 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1279 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1280}
1281
stephen hemminger8d962862010-10-21 07:50:56 +00001282static void bnx2x_int_disable(struct bnx2x *bp)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001283{
1284 if (bp->common.int_block == INT_BLOCK_HC)
1285 bnx2x_hc_int_disable(bp);
1286 else
1287 bnx2x_igu_int_disable(bp);
1288}
1289
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001290void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001291{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001292 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +00001293 int i, offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001294
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001295 /* disable interrupt handling */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001296 atomic_inc(&bp->intr_sem);
Eilon Greensteine1510702009-07-21 05:47:41 +00001297 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1298
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07001299 if (disable_hw)
1300 /* prevent the HW from sending interrupts */
1301 bnx2x_int_disable(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001302
1303 /* make sure all ISRs are done */
1304 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001305 synchronize_irq(bp->msix_table[0].vector);
1306 offset = 1;
Michael Chan37b091b2009-10-10 13:46:55 +00001307#ifdef BCM_CNIC
1308 offset++;
1309#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001310 for_each_eth_queue(bp, i)
Eilon Greenstein8badd272009-02-12 08:36:15 +00001311 synchronize_irq(bp->msix_table[i + offset].vector);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001312 } else
1313 synchronize_irq(bp->pdev->irq);
1314
1315 /* make sure sp_task is not running */
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001316 cancel_delayed_work(&bp->sp_task);
1317 flush_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001318}
1319
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001320/* fast path */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001321
1322/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001323 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001324 */
1325
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001326/* Return true if succeeded to acquire the lock */
1327static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1328{
1329 u32 lock_status;
1330 u32 resource_bit = (1 << resource);
1331 int func = BP_FUNC(bp);
1332 u32 hw_lock_control_reg;
1333
1334 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1335
1336 /* Validating that the resource is within range */
1337 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1338 DP(NETIF_MSG_HW,
1339 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1340 resource, HW_LOCK_MAX_RESOURCE_VALUE);
Eric Dumazet0fdf4d02010-08-26 22:03:53 -07001341 return false;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001342 }
1343
1344 if (func <= 5)
1345 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1346 else
1347 hw_lock_control_reg =
1348 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1349
1350 /* Try to acquire the lock */
1351 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1352 lock_status = REG_RD(bp, hw_lock_control_reg);
1353 if (lock_status & resource_bit)
1354 return true;
1355
1356 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1357 return false;
1358}
1359
Michael Chan993ac7b2009-10-10 13:46:56 +00001360#ifdef BCM_CNIC
1361static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1362#endif
Eilon Greenstein3196a882008-08-13 15:58:49 -07001363
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001364void bnx2x_sp_event(struct bnx2x_fastpath *fp,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001365 union eth_rx_cqe *rr_cqe)
1366{
1367 struct bnx2x *bp = fp->bp;
1368 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1369 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1370
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001371 DP(BNX2X_MSG_SP,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001372 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +00001373 fp->index, cid, command, bp->state,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001374 rr_cqe->ramrod_cqe.ramrod_type);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001375
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001376 switch (command | fp->state) {
1377 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1378 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1379 fp->state = BNX2X_FP_STATE_OPEN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001380 break;
1381
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001382 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1383 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001384 fp->state = BNX2X_FP_STATE_HALTED;
1385 break;
1386
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001387 case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1388 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1389 fp->state = BNX2X_FP_STATE_TERMINATED;
Eliezer Tamir49d66772008-02-28 11:53:13 -08001390 break;
1391
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001392 default:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001393 BNX2X_ERR("unexpected MC reply (%d) "
1394 "fp[%d] state is %x\n",
1395 command, fp->index, fp->state);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001396 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001397 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001398
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00001399 smp_mb__before_atomic_inc();
1400 atomic_inc(&bp->spq_left);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001401 /* push the change in fp->state and towards the memory */
1402 smp_wmb();
1403
1404 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001405}
1406
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001407irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001408{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001409 struct bnx2x *bp = netdev_priv(dev_instance);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001410 u16 status = bnx2x_ack_int(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001411 u16 mask;
Eilon Greensteinca003922009-08-12 22:53:28 -07001412 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001413
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001414 /* Return here if interrupt is shared and it's not for us */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001415 if (unlikely(status == 0)) {
1416 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1417 return IRQ_NONE;
1418 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001419 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001420
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001421 /* Return here if interrupt is disabled */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001422 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1423 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1424 return IRQ_HANDLED;
1425 }
1426
Eilon Greenstein3196a882008-08-13 15:58:49 -07001427#ifdef BNX2X_STOP_ON_ERROR
1428 if (unlikely(bp->panic))
1429 return IRQ_HANDLED;
1430#endif
1431
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001432 for_each_eth_queue(bp, i) {
Eilon Greensteinca003922009-08-12 22:53:28 -07001433 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001434
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001435 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
Eilon Greensteinca003922009-08-12 22:53:28 -07001436 if (status & mask) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001437 /* Handle Rx and Tx according to SB id */
1438 prefetch(fp->rx_cons_sb);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001439 prefetch(fp->tx_cons_sb);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001440 prefetch(&fp->sb_running_index[SM_RX_ID]);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001441 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
Eilon Greensteinca003922009-08-12 22:53:28 -07001442 status &= ~mask;
1443 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001444 }
1445
Michael Chan993ac7b2009-10-10 13:46:56 +00001446#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001447 mask = 0x2;
Michael Chan993ac7b2009-10-10 13:46:56 +00001448 if (status & (mask | 0x1)) {
1449 struct cnic_ops *c_ops = NULL;
1450
1451 rcu_read_lock();
1452 c_ops = rcu_dereference(bp->cnic_ops);
1453 if (c_ops)
1454 c_ops->cnic_handler(bp->cnic_data, NULL);
1455 rcu_read_unlock();
1456
1457 status &= ~mask;
1458 }
1459#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001460
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001461 if (unlikely(status & 0x1)) {
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001462 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001463
1464 status &= ~0x1;
1465 if (!status)
1466 return IRQ_HANDLED;
1467 }
1468
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00001469 if (unlikely(status))
1470 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001471 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001472
1473 return IRQ_HANDLED;
1474}
1475
1476/* end of fast path */
1477
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001478
1479/* Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001480
1481/*
1482 * General service functions
1483 */
1484
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001485int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001486{
Eliezer Tamirf1410642008-02-28 11:51:50 -08001487 u32 lock_status;
1488 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001489 int func = BP_FUNC(bp);
1490 u32 hw_lock_control_reg;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001491 int cnt;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001492
1493 /* Validating that the resource is within range */
1494 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1495 DP(NETIF_MSG_HW,
1496 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1497 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1498 return -EINVAL;
1499 }
1500
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001501 if (func <= 5) {
1502 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1503 } else {
1504 hw_lock_control_reg =
1505 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1506 }
1507
Eliezer Tamirf1410642008-02-28 11:51:50 -08001508 /* Validating that the resource is not already taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001509 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001510 if (lock_status & resource_bit) {
1511 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1512 lock_status, resource_bit);
1513 return -EEXIST;
1514 }
1515
Eilon Greenstein46230476b2008-08-25 15:23:30 -07001516 /* Try for 5 second every 5ms */
1517 for (cnt = 0; cnt < 1000; cnt++) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08001518 /* Try to acquire the lock */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001519 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1520 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001521 if (lock_status & resource_bit)
1522 return 0;
1523
1524 msleep(5);
1525 }
1526 DP(NETIF_MSG_HW, "Timeout\n");
1527 return -EAGAIN;
1528}
1529
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001530int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001531{
1532 u32 lock_status;
1533 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001534 int func = BP_FUNC(bp);
1535 u32 hw_lock_control_reg;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001536
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001537 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1538
Eliezer Tamirf1410642008-02-28 11:51:50 -08001539 /* Validating that the resource is within range */
1540 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1541 DP(NETIF_MSG_HW,
1542 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1543 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1544 return -EINVAL;
1545 }
1546
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001547 if (func <= 5) {
1548 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1549 } else {
1550 hw_lock_control_reg =
1551 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1552 }
1553
Eliezer Tamirf1410642008-02-28 11:51:50 -08001554 /* Validating that the resource is currently taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001555 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001556 if (!(lock_status & resource_bit)) {
1557 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1558 lock_status, resource_bit);
1559 return -EFAULT;
1560 }
1561
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001562 REG_WR(bp, hw_lock_control_reg, resource_bit);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001563 return 0;
1564}
1565
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001566
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001567int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1568{
1569 /* The GPIO should be swapped if swap register is set and active */
1570 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1571 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1572 int gpio_shift = gpio_num +
1573 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1574 u32 gpio_mask = (1 << gpio_shift);
1575 u32 gpio_reg;
1576 int value;
1577
1578 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1579 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1580 return -EINVAL;
1581 }
1582
1583 /* read GPIO value */
1584 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1585
1586 /* get the requested pin value */
1587 if ((gpio_reg & gpio_mask) == gpio_mask)
1588 value = 1;
1589 else
1590 value = 0;
1591
1592 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1593
1594 return value;
1595}
1596
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001597int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001598{
1599 /* The GPIO should be swapped if swap register is set and active */
1600 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001601 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001602 int gpio_shift = gpio_num +
1603 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1604 u32 gpio_mask = (1 << gpio_shift);
1605 u32 gpio_reg;
1606
1607 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1608 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1609 return -EINVAL;
1610 }
1611
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001612 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001613 /* read GPIO and mask except the float bits */
1614 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1615
1616 switch (mode) {
1617 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1618 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1619 gpio_num, gpio_shift);
1620 /* clear FLOAT and set CLR */
1621 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1622 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1623 break;
1624
1625 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1626 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1627 gpio_num, gpio_shift);
1628 /* clear FLOAT and set SET */
1629 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1630 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1631 break;
1632
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001633 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001634 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1635 gpio_num, gpio_shift);
1636 /* set FLOAT */
1637 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1638 break;
1639
1640 default:
1641 break;
1642 }
1643
1644 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001645 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001646
1647 return 0;
1648}
1649
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001650int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1651{
1652 /* The GPIO should be swapped if swap register is set and active */
1653 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1654 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1655 int gpio_shift = gpio_num +
1656 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1657 u32 gpio_mask = (1 << gpio_shift);
1658 u32 gpio_reg;
1659
1660 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1661 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1662 return -EINVAL;
1663 }
1664
1665 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1666 /* read GPIO int */
1667 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1668
1669 switch (mode) {
1670 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1671 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1672 "output low\n", gpio_num, gpio_shift);
1673 /* clear SET and set CLR */
1674 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1675 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1676 break;
1677
1678 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1679 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1680 "output high\n", gpio_num, gpio_shift);
1681 /* clear CLR and set SET */
1682 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1683 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1684 break;
1685
1686 default:
1687 break;
1688 }
1689
1690 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1691 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1692
1693 return 0;
1694}
1695
Eliezer Tamirf1410642008-02-28 11:51:50 -08001696static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1697{
1698 u32 spio_mask = (1 << spio_num);
1699 u32 spio_reg;
1700
1701 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1702 (spio_num > MISC_REGISTERS_SPIO_7)) {
1703 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1704 return -EINVAL;
1705 }
1706
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001707 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001708 /* read SPIO and mask except the float bits */
1709 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1710
1711 switch (mode) {
Eilon Greenstein6378c022008-08-13 15:59:25 -07001712 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001713 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1714 /* clear FLOAT and set CLR */
1715 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1716 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1717 break;
1718
Eilon Greenstein6378c022008-08-13 15:59:25 -07001719 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001720 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1721 /* clear FLOAT and set SET */
1722 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1723 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1724 break;
1725
1726 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1727 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1728 /* set FLOAT */
1729 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1730 break;
1731
1732 default:
1733 break;
1734 }
1735
1736 REG_WR(bp, MISC_REG_SPIO, spio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001737 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001738
1739 return 0;
1740}
1741
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001742int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1743{
1744 u32 sel_phy_idx = 0;
1745 if (bp->link_vars.link_up) {
1746 sel_phy_idx = EXT_PHY1;
1747 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1748 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1749 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1750 sel_phy_idx = EXT_PHY2;
1751 } else {
1752
1753 switch (bnx2x_phy_selection(&bp->link_params)) {
1754 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1755 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1756 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1757 sel_phy_idx = EXT_PHY1;
1758 break;
1759 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1760 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1761 sel_phy_idx = EXT_PHY2;
1762 break;
1763 }
1764 }
1765 /*
1766 * The selected actived PHY is always after swapping (in case PHY
1767 * swapping is enabled). So when swapping is enabled, we need to reverse
1768 * the configuration
1769 */
1770
1771 if (bp->link_params.multi_phy_config &
1772 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1773 if (sel_phy_idx == EXT_PHY1)
1774 sel_phy_idx = EXT_PHY2;
1775 else if (sel_phy_idx == EXT_PHY2)
1776 sel_phy_idx = EXT_PHY1;
1777 }
1778 return LINK_CONFIG_IDX(sel_phy_idx);
1779}
1780
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001781void bnx2x_calc_fc_adv(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001782{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001783 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
Eilon Greensteinad33ea32009-01-14 21:24:57 -08001784 switch (bp->link_vars.ieee_fc &
1785 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001786 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001787 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001788 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001789 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001790
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001791 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001792 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001793 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001794 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001795
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001796 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001797 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001798 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001799
Eliezer Tamirf1410642008-02-28 11:51:50 -08001800 default:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001801 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001802 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001803 break;
1804 }
1805}
1806
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001807u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001808{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001809 if (!BP_NOMCP(bp)) {
1810 u8 rc;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001811 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1812 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
Eilon Greenstein19680c42008-08-13 15:47:33 -07001813 /* Initialize link parameters structure variables */
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07001814 /* It is recommended to turn off RX FC for jumbo frames
1815 for better performance */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001816 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
David S. Millerc0700f92008-12-16 23:53:20 -08001817 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07001818 else
David S. Millerc0700f92008-12-16 23:53:20 -08001819 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001820
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001821 bnx2x_acquire_phy_lock(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001822
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001823 if (load_mode == LOAD_DIAG) {
Yaniv Rosnerde6eae12010-09-07 11:41:13 +00001824 bp->link_params.loopback_mode = LOOPBACK_XGXS;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001825 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1826 }
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001827
Eilon Greenstein19680c42008-08-13 15:47:33 -07001828 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001829
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001830 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001831
Eilon Greenstein3c96c682009-01-14 21:25:31 -08001832 bnx2x_calc_fc_adv(bp);
1833
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001834 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1835 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001836 bnx2x_link_report(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001837 }
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001838 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
Eilon Greenstein19680c42008-08-13 15:47:33 -07001839 return rc;
1840 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001841 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
Eilon Greenstein19680c42008-08-13 15:47:33 -07001842 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001843}
1844
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001845void bnx2x_link_set(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001846{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001847 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001848 bnx2x_acquire_phy_lock(bp);
Yaniv Rosner54c2fb72010-09-01 09:51:23 +00001849 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001850 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001851 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001852
Eilon Greenstein19680c42008-08-13 15:47:33 -07001853 bnx2x_calc_fc_adv(bp);
1854 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00001855 BNX2X_ERR("Bootcode is missing - can not set link\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001856}
1857
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001858static void bnx2x__link_reset(struct bnx2x *bp)
1859{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001860 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001861 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein589abe32009-02-12 08:36:55 +00001862 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001863 bnx2x_release_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001864 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00001865 BNX2X_ERR("Bootcode is missing - can not reset link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001866}
1867
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001868u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001869{
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001870 u8 rc = 0;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001871
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001872 if (!BP_NOMCP(bp)) {
1873 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001874 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1875 is_serdes);
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001876 bnx2x_release_phy_lock(bp);
1877 } else
1878 BNX2X_ERR("Bootcode is missing - can not test link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001879
1880 return rc;
1881}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001882
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001883static void bnx2x_init_port_minmax(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001884{
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001885 u32 r_param = bp->link_vars.line_speed / 8;
1886 u32 fair_periodic_timeout_usec;
1887 u32 t_fair;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001888
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001889 memset(&(bp->cmng.rs_vars), 0,
1890 sizeof(struct rate_shaping_vars_per_port));
1891 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001892
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001893 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1894 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001895
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001896 /* this is the threshold below which no timer arming will occur
1897 1.25 coefficient is for the threshold to be a little bigger
1898 than the real time, to compensate for timer in-accuracy */
1899 bp->cmng.rs_vars.rs_threshold =
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001900 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1901
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001902 /* resolution of fairness timer */
1903 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1904 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1905 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001906
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001907 /* this is the threshold below which we won't arm the timer anymore */
1908 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001909
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001910 /* we multiply by 1e3/8 to get bytes/msec.
1911 We don't want the credits to pass a credit
1912 of the t_fair*FAIR_MEM (algorithm resolution) */
1913 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1914 /* since each tick is 4 usec */
1915 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001916}
1917
Eilon Greenstein2691d512009-08-12 08:22:08 +00001918/* Calculates the sum of vn_min_rates.
1919 It's needed for further normalizing of the min_rates.
1920 Returns:
1921 sum of vn_min_rates.
1922 or
1923 0 - if all the min_rates are 0.
1924 In the later case fainess algorithm should be deactivated.
1925 If not all min_rates are zero then those that are zeroes will be set to 1.
1926 */
1927static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1928{
1929 int all_zero = 1;
Eilon Greenstein2691d512009-08-12 08:22:08 +00001930 int vn;
1931
1932 bp->vn_weight_sum = 0;
1933 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001934 u32 vn_cfg = bp->mf_config[vn];
Eilon Greenstein2691d512009-08-12 08:22:08 +00001935 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1936 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1937
1938 /* Skip hidden vns */
1939 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1940 continue;
1941
1942 /* If min rate is zero - set it to 1 */
1943 if (!vn_min_rate)
1944 vn_min_rate = DEF_MIN_RATE;
1945 else
1946 all_zero = 0;
1947
1948 bp->vn_weight_sum += vn_min_rate;
1949 }
1950
1951 /* ... only if all min rates are zeros - disable fairness */
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001952 if (all_zero) {
1953 bp->cmng.flags.cmng_enables &=
1954 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1955 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1956 " fairness will be disabled\n");
1957 } else
1958 bp->cmng.flags.cmng_enables |=
1959 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
Eilon Greenstein2691d512009-08-12 08:22:08 +00001960}
1961
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001962static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001963{
1964 struct rate_shaping_vars_per_vn m_rs_vn;
1965 struct fairness_vars_per_vn m_fair_vn;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001966 u32 vn_cfg = bp->mf_config[vn];
1967 int func = 2*vn + BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001968 u16 vn_min_rate, vn_max_rate;
1969 int i;
1970
1971 /* If function is hidden - set min and max to zeroes */
1972 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1973 vn_min_rate = 0;
1974 vn_max_rate = 0;
1975
1976 } else {
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001977 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
1978
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001979 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1980 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001981 /* If fairness is enabled (not all min rates are zeroes) and
1982 if current min rate is zero - set it to 1.
1983 This is a requirement of the algorithm. */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001984 if (bp->vn_weight_sum && (vn_min_rate == 0))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001985 vn_min_rate = DEF_MIN_RATE;
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001986
1987 if (IS_MF_SI(bp))
1988 /* maxCfg in percents of linkspeed */
1989 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
1990 else
1991 /* maxCfg is absolute in 100Mb units */
1992 vn_max_rate = maxCfg * 100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001993 }
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001994
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001995 DP(NETIF_MSG_IFUP,
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001996 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001997 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001998
1999 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2000 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2001
2002 /* global vn counter - maximal Mbps for this vn */
2003 m_rs_vn.vn_counter.rate = vn_max_rate;
2004
2005 /* quota - number of bytes transmitted in this period */
2006 m_rs_vn.vn_counter.quota =
2007 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2008
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002009 if (bp->vn_weight_sum) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002010 /* credit for each period of the fairness algorithm:
2011 number of bytes in T_FAIR (the vn share the port rate).
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002012 vn_weight_sum should not be larger than 10000, thus
2013 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2014 than zero */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002015 m_fair_vn.vn_credit_delta =
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002016 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2017 (8 * bp->vn_weight_sum))),
Dmitry Kravkovff80ee02011-02-28 03:37:11 +00002018 (bp->cmng.fair_vars.fair_threshold +
2019 MIN_ABOVE_THRESH));
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002020 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002021 m_fair_vn.vn_credit_delta);
2022 }
2023
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002024 /* Store it to internal memory */
2025 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2026 REG_WR(bp, BAR_XSTRORM_INTMEM +
2027 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2028 ((u32 *)(&m_rs_vn))[i]);
2029
2030 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2031 REG_WR(bp, BAR_XSTRORM_INTMEM +
2032 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2033 ((u32 *)(&m_fair_vn))[i]);
2034}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002035
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002036static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2037{
2038 if (CHIP_REV_IS_SLOW(bp))
2039 return CMNG_FNS_NONE;
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00002040 if (IS_MF(bp))
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002041 return CMNG_FNS_MINMAX;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002042
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002043 return CMNG_FNS_NONE;
2044}
2045
2046static void bnx2x_read_mf_cfg(struct bnx2x *bp)
2047{
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002048 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002049
2050 if (BP_NOMCP(bp))
2051 return; /* what should be the default bvalue in this case */
2052
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002053 /* For 2 port configuration the absolute function number formula
2054 * is:
2055 * abs_func = 2 * vn + BP_PORT + BP_PATH
2056 *
2057 * and there are 4 functions per port
2058 *
2059 * For 4 port configuration it is
2060 * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
2061 *
2062 * and there are 2 functions per port
2063 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002064 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002065 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2066
2067 if (func >= E1H_FUNC_MAX)
2068 break;
2069
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002070 bp->mf_config[vn] =
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002071 MF_CFG_RD(bp, func_mf_config[func].config);
2072 }
2073}
2074
2075static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2076{
2077
2078 if (cmng_type == CMNG_FNS_MINMAX) {
2079 int vn;
2080
2081 /* clear cmng_enables */
2082 bp->cmng.flags.cmng_enables = 0;
2083
2084 /* read mf conf from shmem */
2085 if (read_cfg)
2086 bnx2x_read_mf_cfg(bp);
2087
2088 /* Init rate shaping and fairness contexts */
2089 bnx2x_init_port_minmax(bp);
2090
2091 /* vn_weight_sum and enable fairness if not 0 */
2092 bnx2x_calc_vn_weight_sum(bp);
2093
2094 /* calculate and set min-max rate for each vn */
2095 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2096 bnx2x_init_vn_minmax(bp, vn);
2097
2098 /* always enable rate shaping and fairness */
2099 bp->cmng.flags.cmng_enables |=
2100 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2101 if (!bp->vn_weight_sum)
2102 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2103 " fairness will be disabled\n");
2104 return;
2105 }
2106
2107 /* rate shaping and fairness are disabled */
2108 DP(NETIF_MSG_IFUP,
2109 "rate shaping and fairness are disabled\n");
2110}
2111
2112static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2113{
2114 int port = BP_PORT(bp);
2115 int func;
2116 int vn;
2117
2118 /* Set the attention towards other drivers on the same port */
2119 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2120 if (vn == BP_E1HVN(bp))
2121 continue;
2122
2123 func = ((vn << 1) | port);
2124 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2125 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2126 }
2127}
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002128
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002129/* This function is called upon link interrupt */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002130static void bnx2x_link_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002131{
Vladislav Zolotarovd9e8b182010-04-19 01:15:08 +00002132 u32 prev_link_status = bp->link_vars.link_status;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002133 /* Make sure that we are synced with the current statistics */
2134 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2135
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002136 bnx2x_link_update(&bp->link_params, &bp->link_vars);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002137
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002138 if (bp->link_vars.link_up) {
2139
Eilon Greenstein1c063282009-02-12 08:36:43 +00002140 /* dropless flow control */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002141 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
Eilon Greenstein1c063282009-02-12 08:36:43 +00002142 int port = BP_PORT(bp);
2143 u32 pause_enabled = 0;
2144
2145 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2146 pause_enabled = 1;
2147
2148 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07002149 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
Eilon Greenstein1c063282009-02-12 08:36:43 +00002150 pause_enabled);
2151 }
2152
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002153 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2154 struct host_port_stats *pstats;
2155
2156 pstats = bnx2x_sp(bp, port_stats);
2157 /* reset old bmac stats */
2158 memset(&(pstats->mac_stx[0]), 0,
2159 sizeof(struct mac_stx));
2160 }
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002161 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002162 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2163 }
2164
Vladislav Zolotarovd9e8b182010-04-19 01:15:08 +00002165 /* indicate link status only if link status actually changed */
2166 if (prev_link_status != bp->link_vars.link_status)
2167 bnx2x_link_report(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002168
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002169 if (IS_MF(bp))
2170 bnx2x_link_sync_notify(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002171
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002172 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2173 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002174
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002175 if (cmng_fns != CMNG_FNS_NONE) {
2176 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2177 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2178 } else
2179 /* rate shaping and fairness are disabled */
2180 DP(NETIF_MSG_IFUP,
2181 "single function mode without fairness\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002182 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002183}
2184
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002185void bnx2x__link_status_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002186{
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002187 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002188 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002189
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002190 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2191
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002192 if (bp->link_vars.link_up)
2193 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2194 else
2195 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2196
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002197 /* the link status update could be the result of a DCC event
2198 hence re-read the shmem mf configuration */
2199 bnx2x_read_mf_cfg(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002200
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002201 /* indicate link status */
2202 bnx2x_link_report(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002203}
2204
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002205static void bnx2x_pmf_update(struct bnx2x *bp)
2206{
2207 int port = BP_PORT(bp);
2208 u32 val;
2209
2210 bp->port.pmf = 1;
2211 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2212
2213 /* enable nig attention */
2214 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002215 if (bp->common.int_block == INT_BLOCK_HC) {
2216 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2217 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2218 } else if (CHIP_IS_E2(bp)) {
2219 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2220 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2221 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002222
2223 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002224}
2225
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002226/* end of Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002227
2228/* slow path */
2229
2230/*
2231 * General service functions
2232 */
2233
Eilon Greenstein2691d512009-08-12 08:22:08 +00002234/* send the MCP a request, block until there is a reply */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002235u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
Eilon Greenstein2691d512009-08-12 08:22:08 +00002236{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002237 int mb_idx = BP_FW_MB_IDX(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002238 u32 seq = ++bp->fw_seq;
2239 u32 rc = 0;
2240 u32 cnt = 1;
2241 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2242
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002243 mutex_lock(&bp->fw_mb_mutex);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002244 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2245 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2246
Eilon Greenstein2691d512009-08-12 08:22:08 +00002247 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2248
2249 do {
2250 /* let the FW do it's magic ... */
2251 msleep(delay);
2252
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002253 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002254
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002255 /* Give the FW up to 5 second (500*10ms) */
2256 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
Eilon Greenstein2691d512009-08-12 08:22:08 +00002257
2258 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2259 cnt*delay, rc, seq);
2260
2261 /* is this a reply to our command? */
2262 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2263 rc &= FW_MSG_CODE_MASK;
2264 else {
2265 /* FW BUG! */
2266 BNX2X_ERR("FW failed to respond!\n");
2267 bnx2x_fw_dump(bp);
2268 rc = 0;
2269 }
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002270 mutex_unlock(&bp->fw_mb_mutex);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002271
2272 return rc;
2273}
2274
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002275static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
2276{
2277#ifdef BCM_CNIC
2278 if (IS_FCOE_FP(fp) && IS_MF(bp))
2279 return false;
2280#endif
2281 return true;
2282}
2283
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002284/* must be called under rtnl_lock */
stephen hemminger8d962862010-10-21 07:50:56 +00002285static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002286{
2287 u32 mask = (1 << cl_id);
2288
2289 /* initial seeting is BNX2X_ACCEPT_NONE */
2290 u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2291 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2292 u8 unmatched_unicast = 0;
2293
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002294 if (filters & BNX2X_ACCEPT_UNMATCHED_UCAST)
2295 unmatched_unicast = 1;
2296
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002297 if (filters & BNX2X_PROMISCUOUS_MODE) {
2298 /* promiscious - accept all, drop none */
2299 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2300 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002301 if (IS_MF_SI(bp)) {
2302 /*
2303 * SI mode defines to accept in promiscuos mode
2304 * only unmatched packets
2305 */
2306 unmatched_unicast = 1;
2307 accp_all_ucast = 0;
2308 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002309 }
2310 if (filters & BNX2X_ACCEPT_UNICAST) {
2311 /* accept matched ucast */
2312 drop_all_ucast = 0;
2313 }
Vladislav Zolotarovd9c8f492011-02-01 14:05:30 -08002314 if (filters & BNX2X_ACCEPT_MULTICAST)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002315 /* accept matched mcast */
2316 drop_all_mcast = 0;
Vladislav Zolotarovd9c8f492011-02-01 14:05:30 -08002317
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002318 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2319 /* accept all mcast */
2320 drop_all_ucast = 0;
2321 accp_all_ucast = 1;
2322 }
2323 if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2324 /* accept all mcast */
2325 drop_all_mcast = 0;
2326 accp_all_mcast = 1;
2327 }
2328 if (filters & BNX2X_ACCEPT_BROADCAST) {
2329 /* accept (all) bcast */
2330 drop_all_bcast = 0;
2331 accp_all_bcast = 1;
2332 }
2333
2334 bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2335 bp->mac_filters.ucast_drop_all | mask :
2336 bp->mac_filters.ucast_drop_all & ~mask;
2337
2338 bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2339 bp->mac_filters.mcast_drop_all | mask :
2340 bp->mac_filters.mcast_drop_all & ~mask;
2341
2342 bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2343 bp->mac_filters.bcast_drop_all | mask :
2344 bp->mac_filters.bcast_drop_all & ~mask;
2345
2346 bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2347 bp->mac_filters.ucast_accept_all | mask :
2348 bp->mac_filters.ucast_accept_all & ~mask;
2349
2350 bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2351 bp->mac_filters.mcast_accept_all | mask :
2352 bp->mac_filters.mcast_accept_all & ~mask;
2353
2354 bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2355 bp->mac_filters.bcast_accept_all | mask :
2356 bp->mac_filters.bcast_accept_all & ~mask;
2357
2358 bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2359 bp->mac_filters.unmatched_unicast | mask :
2360 bp->mac_filters.unmatched_unicast & ~mask;
2361}
2362
stephen hemminger8d962862010-10-21 07:50:56 +00002363static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002364{
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002365 struct tstorm_eth_function_common_config tcfg = {0};
2366 u16 rss_flgs;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002367
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002368 /* tpa */
2369 if (p->func_flgs & FUNC_FLG_TPA)
2370 tcfg.config_flags |=
2371 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002372
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002373 /* set rss flags */
2374 rss_flgs = (p->rss->mode <<
2375 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002376
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002377 if (p->rss->cap & RSS_IPV4_CAP)
2378 rss_flgs |= RSS_IPV4_CAP_MASK;
2379 if (p->rss->cap & RSS_IPV4_TCP_CAP)
2380 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2381 if (p->rss->cap & RSS_IPV6_CAP)
2382 rss_flgs |= RSS_IPV6_CAP_MASK;
2383 if (p->rss->cap & RSS_IPV6_TCP_CAP)
2384 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002385
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002386 tcfg.config_flags |= rss_flgs;
2387 tcfg.rss_result_mask = p->rss->result_mask;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002388
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002389 storm_memset_func_cfg(bp, &tcfg, p->func_id);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002390
2391 /* Enable the function in the FW */
2392 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2393 storm_memset_func_en(bp, p->func_id, 1);
2394
2395 /* statistics */
2396 if (p->func_flgs & FUNC_FLG_STATS) {
2397 struct stats_indication_flags stats_flags = {0};
2398 stats_flags.collect_eth = 1;
2399
2400 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2401 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2402
2403 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2404 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2405
2406 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2407 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2408
2409 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2410 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2411 }
2412
2413 /* spq */
2414 if (p->func_flgs & FUNC_FLG_SPQ) {
2415 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2416 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2417 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2418 }
2419}
2420
2421static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2422 struct bnx2x_fastpath *fp)
2423{
2424 u16 flags = 0;
2425
2426 /* calculate queue flags */
2427 flags |= QUEUE_FLG_CACHE_ALIGN;
2428 flags |= QUEUE_FLG_HC;
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002429 flags |= IS_MF_SD(bp) ? QUEUE_FLG_OV : 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002430
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002431 flags |= QUEUE_FLG_VLAN;
2432 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002433
2434 if (!fp->disable_tpa)
2435 flags |= QUEUE_FLG_TPA;
2436
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002437 flags = stat_counter_valid(bp, fp) ?
2438 (flags | QUEUE_FLG_STATS) : (flags & ~QUEUE_FLG_STATS);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002439
2440 return flags;
2441}
2442
2443static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2444 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2445 struct bnx2x_rxq_init_params *rxq_init)
2446{
2447 u16 max_sge = 0;
2448 u16 sge_sz = 0;
2449 u16 tpa_agg_size = 0;
2450
2451 /* calculate queue flags */
2452 u16 flags = bnx2x_get_cl_flags(bp, fp);
2453
2454 if (!fp->disable_tpa) {
2455 pause->sge_th_hi = 250;
2456 pause->sge_th_lo = 150;
2457 tpa_agg_size = min_t(u32,
2458 (min_t(u32, 8, MAX_SKB_FRAGS) *
2459 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2460 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2461 SGE_PAGE_SHIFT;
2462 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2463 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2464 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2465 0xffff);
2466 }
2467
2468 /* pause - not for e1 */
2469 if (!CHIP_IS_E1(bp)) {
2470 pause->bd_th_hi = 350;
2471 pause->bd_th_lo = 250;
2472 pause->rcq_th_hi = 350;
2473 pause->rcq_th_lo = 250;
2474 pause->sge_th_hi = 0;
2475 pause->sge_th_lo = 0;
2476 pause->pri_map = 1;
2477 }
2478
2479 /* rxq setup */
2480 rxq_init->flags = flags;
2481 rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2482 rxq_init->dscr_map = fp->rx_desc_mapping;
2483 rxq_init->sge_map = fp->rx_sge_mapping;
2484 rxq_init->rcq_map = fp->rx_comp_mapping;
2485 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2486 rxq_init->mtu = bp->dev->mtu;
2487 rxq_init->buf_sz = bp->rx_buf_size;
2488 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2489 rxq_init->cl_id = fp->cl_id;
2490 rxq_init->spcl_id = fp->cl_id;
2491 rxq_init->stat_id = fp->cl_id;
2492 rxq_init->tpa_agg_sz = tpa_agg_size;
2493 rxq_init->sge_buf_sz = sge_sz;
2494 rxq_init->max_sges_pkt = max_sge;
2495 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2496 rxq_init->fw_sb_id = fp->fw_sb_id;
2497
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002498 if (IS_FCOE_FP(fp))
2499 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
2500 else
2501 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002502
2503 rxq_init->cid = HW_CID(bp, fp->cid);
2504
2505 rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2506}
2507
2508static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2509 struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2510{
2511 u16 flags = bnx2x_get_cl_flags(bp, fp);
2512
2513 txq_init->flags = flags;
2514 txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2515 txq_init->dscr_map = fp->tx_desc_mapping;
2516 txq_init->stat_id = fp->cl_id;
2517 txq_init->cid = HW_CID(bp, fp->cid);
2518 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2519 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2520 txq_init->fw_sb_id = fp->fw_sb_id;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002521
2522 if (IS_FCOE_FP(fp)) {
2523 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
2524 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
2525 }
2526
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002527 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2528}
2529
stephen hemminger8d962862010-10-21 07:50:56 +00002530static void bnx2x_pf_init(struct bnx2x *bp)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002531{
2532 struct bnx2x_func_init_params func_init = {0};
2533 struct bnx2x_rss_params rss = {0};
2534 struct event_ring_data eq_data = { {0} };
2535 u16 flags;
2536
2537 /* pf specific setups */
2538 if (!CHIP_IS_E1(bp))
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00002539 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002540
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002541 if (CHIP_IS_E2(bp)) {
2542 /* reset IGU PF statistics: MSIX + ATTN */
2543 /* PF */
2544 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2545 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2546 (CHIP_MODE_IS_4_PORT(bp) ?
2547 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2548 /* ATTN */
2549 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2550 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2551 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2552 (CHIP_MODE_IS_4_PORT(bp) ?
2553 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2554 }
2555
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002556 /* function setup flags */
2557 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2558
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002559 if (CHIP_IS_E1x(bp))
2560 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2561 else
2562 flags |= FUNC_FLG_TPA;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002563
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002564 /* function setup */
2565
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002566 /**
2567 * Although RSS is meaningless when there is a single HW queue we
2568 * still need it enabled in order to have HW Rx hash generated.
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002569 */
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002570 rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2571 RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2572 rss.mode = bp->multi_mode;
2573 rss.result_mask = MULTI_MASK;
2574 func_init.rss = &rss;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002575
2576 func_init.func_flgs = flags;
2577 func_init.pf_id = BP_FUNC(bp);
2578 func_init.func_id = BP_FUNC(bp);
2579 func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2580 func_init.spq_map = bp->spq_mapping;
2581 func_init.spq_prod = bp->spq_prod_idx;
2582
2583 bnx2x_func_init(bp, &func_init);
2584
2585 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2586
2587 /*
2588 Congestion management values depend on the link rate
2589 There is no active link so initial link rate is set to 10 Gbps.
2590 When the link comes up The congestion management values are
2591 re-calculated according to the actual link rate.
2592 */
2593 bp->link_vars.line_speed = SPEED_10000;
2594 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2595
2596 /* Only the PMF sets the HW */
2597 if (bp->port.pmf)
2598 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2599
2600 /* no rx until link is up */
2601 bp->rx_mode = BNX2X_RX_MODE_NONE;
2602 bnx2x_set_storm_rx_mode(bp);
2603
2604 /* init Event Queue */
2605 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2606 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2607 eq_data.producer = bp->eq_prod;
2608 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2609 eq_data.sb_id = DEF_SB_ID;
2610 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2611}
2612
2613
Eilon Greenstein2691d512009-08-12 08:22:08 +00002614static void bnx2x_e1h_disable(struct bnx2x *bp)
2615{
2616 int port = BP_PORT(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002617
2618 netif_tx_disable(bp->dev);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002619
2620 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2621
Eilon Greenstein2691d512009-08-12 08:22:08 +00002622 netif_carrier_off(bp->dev);
2623}
2624
2625static void bnx2x_e1h_enable(struct bnx2x *bp)
2626{
2627 int port = BP_PORT(bp);
2628
2629 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2630
Eilon Greenstein2691d512009-08-12 08:22:08 +00002631 /* Tx queue should be only reenabled */
2632 netif_tx_wake_all_queues(bp->dev);
2633
Eilon Greenstein061bc702009-10-15 00:18:47 -07002634 /*
2635 * Should not call netif_carrier_on since it will be called if the link
2636 * is up when checking for link state
2637 */
Eilon Greenstein2691d512009-08-12 08:22:08 +00002638}
2639
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002640/* called due to MCP event (on pmf):
2641 * reread new bandwidth configuration
2642 * configure FW
2643 * notify others function about the change
2644 */
2645static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
2646{
2647 if (bp->link_vars.link_up) {
2648 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2649 bnx2x_link_sync_notify(bp);
2650 }
2651 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2652}
2653
2654static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
2655{
2656 bnx2x_config_mf_bw(bp);
2657 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
2658}
2659
Eilon Greenstein2691d512009-08-12 08:22:08 +00002660static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2661{
Eilon Greenstein2691d512009-08-12 08:22:08 +00002662 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002663
2664 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2665
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002666 /*
2667 * This is the only place besides the function initialization
2668 * where the bp->flags can change so it is done without any
2669 * locks
2670 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002671 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
Eilon Greenstein2691d512009-08-12 08:22:08 +00002672 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002673 bp->flags |= MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002674
2675 bnx2x_e1h_disable(bp);
2676 } else {
2677 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002678 bp->flags &= ~MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002679
2680 bnx2x_e1h_enable(bp);
2681 }
2682 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2683 }
2684 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002685 bnx2x_config_mf_bw(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002686 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2687 }
2688
2689 /* Report results to MCP */
2690 if (dcc_event)
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002691 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002692 else
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002693 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002694}
2695
Michael Chan289129022009-10-10 13:46:53 +00002696/* must be called under the spq lock */
2697static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2698{
2699 struct eth_spe *next_spe = bp->spq_prod_bd;
2700
2701 if (bp->spq_prod_bd == bp->spq_last_bd) {
2702 bp->spq_prod_bd = bp->spq;
2703 bp->spq_prod_idx = 0;
2704 DP(NETIF_MSG_TIMER, "end of spq\n");
2705 } else {
2706 bp->spq_prod_bd++;
2707 bp->spq_prod_idx++;
2708 }
2709 return next_spe;
2710}
2711
2712/* must be called under the spq lock */
2713static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2714{
2715 int func = BP_FUNC(bp);
2716
2717 /* Make sure that BD data is updated before writing the producer */
2718 wmb();
2719
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002720 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002721 bp->spq_prod_idx);
Michael Chan289129022009-10-10 13:46:53 +00002722 mmiowb();
2723}
2724
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002725/* the slow path queue is odd since completions arrive on the fastpath ring */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002726int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002727 u32 data_hi, u32 data_lo, int common)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002728{
Michael Chan289129022009-10-10 13:46:53 +00002729 struct eth_spe *spe;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002730 u16 type;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002731
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002732#ifdef BNX2X_STOP_ON_ERROR
2733 if (unlikely(bp->panic))
2734 return -EIO;
2735#endif
2736
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002737 spin_lock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002738
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00002739 if (!atomic_read(&bp->spq_left)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002740 BNX2X_ERR("BUG! SPQ ring full!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002741 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002742 bnx2x_panic();
2743 return -EBUSY;
2744 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08002745
Michael Chan289129022009-10-10 13:46:53 +00002746 spe = bnx2x_sp_get_next(bp);
2747
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002748 /* CID needs port number to be encoded int it */
Michael Chan289129022009-10-10 13:46:53 +00002749 spe->hdr.conn_and_cmd_data =
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002750 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2751 HW_CID(bp, cid));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002752
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002753 if (common)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002754 /* Common ramrods:
2755 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2756 * TRAFFIC_STOP, TRAFFIC_START
2757 */
2758 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2759 & SPE_HDR_CONN_TYPE;
2760 else
2761 /* ETH ramrods: SETUP, HALT */
2762 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2763 & SPE_HDR_CONN_TYPE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002764
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002765 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2766 SPE_HDR_FUNCTION_ID);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002767
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002768 spe->hdr.type = cpu_to_le16(type);
2769
2770 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2771 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2772
2773 /* stats ramrod has it's own slot on the spq */
2774 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
2775 /* It's ok if the actual decrement is issued towards the memory
2776 * somewhere between the spin_lock and spin_unlock. Thus no
2777 * more explict memory barrier is needed.
2778 */
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00002779 atomic_dec(&bp->spq_left);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002780
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002781 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002782 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
2783 "type(0x%x) left %x\n",
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002784 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2785 (u32)(U64_LO(bp->spq_mapping) +
2786 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00002787 HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002788
Michael Chan289129022009-10-10 13:46:53 +00002789 bnx2x_sp_prod_update(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002790 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002791 return 0;
2792}
2793
2794/* acquire split MCP access lock register */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002795static int bnx2x_acquire_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002796{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002797 u32 j, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002798 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002799
2800 might_sleep();
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002801 for (j = 0; j < 1000; j++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002802 val = (1UL << 31);
2803 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2804 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2805 if (val & (1L << 31))
2806 break;
2807
2808 msleep(5);
2809 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002810 if (!(val & (1L << 31))) {
Eilon Greenstein19680c42008-08-13 15:47:33 -07002811 BNX2X_ERR("Cannot acquire MCP access lock register\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002812 rc = -EBUSY;
2813 }
2814
2815 return rc;
2816}
2817
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002818/* release split MCP access lock register */
2819static void bnx2x_release_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002820{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002821 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002822}
2823
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002824#define BNX2X_DEF_SB_ATT_IDX 0x0001
2825#define BNX2X_DEF_SB_IDX 0x0002
2826
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002827static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2828{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002829 struct host_sp_status_block *def_sb = bp->def_status_blk;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002830 u16 rc = 0;
2831
2832 barrier(); /* status block is written to by the chip */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002833 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2834 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002835 rc |= BNX2X_DEF_SB_ATT_IDX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002836 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002837
2838 if (bp->def_idx != def_sb->sp_sb.running_index) {
2839 bp->def_idx = def_sb->sp_sb.running_index;
2840 rc |= BNX2X_DEF_SB_IDX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002841 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002842
2843 /* Do not reorder: indecies reading should complete before handling */
2844 barrier();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002845 return rc;
2846}
2847
2848/*
2849 * slow path service functions
2850 */
2851
2852static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2853{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002854 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002855 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2856 MISC_REG_AEU_MASK_ATTN_FUNC_0;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002857 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2858 NIG_REG_MASK_INTERRUPT_PORT0;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002859 u32 aeu_mask;
Eilon Greenstein87942b42009-02-12 08:36:49 +00002860 u32 nig_mask = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002861 u32 reg_addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002862
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002863 if (bp->attn_state & asserted)
2864 BNX2X_ERR("IGU ERROR\n");
2865
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002866 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2867 aeu_mask = REG_RD(bp, aeu_addr);
2868
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002869 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002870 aeu_mask, asserted);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002871 aeu_mask &= ~(asserted & 0x3ff);
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002872 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002873
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002874 REG_WR(bp, aeu_addr, aeu_mask);
2875 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002876
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002877 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002878 bp->attn_state |= asserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002879 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002880
2881 if (asserted & ATTN_HARD_WIRED_MASK) {
2882 if (asserted & ATTN_NIG_FOR_FUNC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002883
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002884 bnx2x_acquire_phy_lock(bp);
2885
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002886 /* save nig interrupt mask */
Eilon Greenstein87942b42009-02-12 08:36:49 +00002887 nig_mask = REG_RD(bp, nig_int_mask_addr);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002888 REG_WR(bp, nig_int_mask_addr, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002889
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002890 bnx2x_link_attn(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002891
2892 /* handle unicore attn? */
2893 }
2894 if (asserted & ATTN_SW_TIMER_4_FUNC)
2895 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2896
2897 if (asserted & GPIO_2_FUNC)
2898 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2899
2900 if (asserted & GPIO_3_FUNC)
2901 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2902
2903 if (asserted & GPIO_4_FUNC)
2904 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2905
2906 if (port == 0) {
2907 if (asserted & ATTN_GENERAL_ATTN_1) {
2908 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2909 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2910 }
2911 if (asserted & ATTN_GENERAL_ATTN_2) {
2912 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2913 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2914 }
2915 if (asserted & ATTN_GENERAL_ATTN_3) {
2916 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2917 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2918 }
2919 } else {
2920 if (asserted & ATTN_GENERAL_ATTN_4) {
2921 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2922 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2923 }
2924 if (asserted & ATTN_GENERAL_ATTN_5) {
2925 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2926 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2927 }
2928 if (asserted & ATTN_GENERAL_ATTN_6) {
2929 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2930 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2931 }
2932 }
2933
2934 } /* if hardwired */
2935
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002936 if (bp->common.int_block == INT_BLOCK_HC)
2937 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2938 COMMAND_REG_ATTN_BITS_SET);
2939 else
2940 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2941
2942 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2943 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2944 REG_WR(bp, reg_addr, asserted);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002945
2946 /* now set back the mask */
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002947 if (asserted & ATTN_NIG_FOR_FUNC) {
Eilon Greenstein87942b42009-02-12 08:36:49 +00002948 REG_WR(bp, nig_int_mask_addr, nig_mask);
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002949 bnx2x_release_phy_lock(bp);
2950 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002951}
2952
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002953static inline void bnx2x_fan_failure(struct bnx2x *bp)
2954{
2955 int port = BP_PORT(bp);
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002956 u32 ext_phy_config;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002957 /* mark the failure */
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002958 ext_phy_config =
2959 SHMEM_RD(bp,
2960 dev_info.port_hw_config[port].external_phy_config);
2961
2962 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2963 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002964 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002965 ext_phy_config);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002966
2967 /* log the failure */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002968 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2969 " the driver to shutdown the card to prevent permanent"
2970 " damage. Please contact OEM Support for assistance\n");
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002971}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00002972
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002973static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2974{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002975 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002976 int reg_offset;
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00002977 u32 val;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002978
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002979 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2980 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002981
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002982 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002983
2984 val = REG_RD(bp, reg_offset);
2985 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2986 REG_WR(bp, reg_offset, val);
2987
2988 BNX2X_ERR("SPIO5 hw attention\n");
2989
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002990 /* Fan failure attention */
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00002991 bnx2x_hw_reset_phy(&bp->link_params);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002992 bnx2x_fan_failure(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002993 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002994
Eilon Greenstein589abe32009-02-12 08:36:55 +00002995 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2996 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2997 bnx2x_acquire_phy_lock(bp);
2998 bnx2x_handle_module_detect_int(&bp->link_params);
2999 bnx2x_release_phy_lock(bp);
3000 }
3001
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003002 if (attn & HW_INTERRUT_ASSERT_SET_0) {
3003
3004 val = REG_RD(bp, reg_offset);
3005 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3006 REG_WR(bp, reg_offset, val);
3007
3008 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00003009 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003010 bnx2x_panic();
3011 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003012}
3013
3014static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3015{
3016 u32 val;
3017
Eilon Greenstein0626b892009-02-12 08:38:14 +00003018 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003019
3020 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3021 BNX2X_ERR("DB hw attention 0x%x\n", val);
3022 /* DORQ discard attention */
3023 if (val & 0x2)
3024 BNX2X_ERR("FATAL error from DORQ\n");
3025 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003026
3027 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3028
3029 int port = BP_PORT(bp);
3030 int reg_offset;
3031
3032 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3033 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3034
3035 val = REG_RD(bp, reg_offset);
3036 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3037 REG_WR(bp, reg_offset, val);
3038
3039 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00003040 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003041 bnx2x_panic();
3042 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003043}
3044
3045static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3046{
3047 u32 val;
3048
3049 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3050
3051 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3052 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3053 /* CFC error attention */
3054 if (val & 0x2)
3055 BNX2X_ERR("FATAL error from CFC\n");
3056 }
3057
3058 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3059
3060 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3061 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3062 /* RQ_USDMDP_FIFO_OVERFLOW */
3063 if (val & 0x18000)
3064 BNX2X_ERR("FATAL error from PXP\n");
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003065 if (CHIP_IS_E2(bp)) {
3066 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
3067 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
3068 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003069 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003070
3071 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3072
3073 int port = BP_PORT(bp);
3074 int reg_offset;
3075
3076 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3077 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3078
3079 val = REG_RD(bp, reg_offset);
3080 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3081 REG_WR(bp, reg_offset, val);
3082
3083 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00003084 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003085 bnx2x_panic();
3086 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003087}
3088
3089static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3090{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003091 u32 val;
3092
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003093 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3094
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003095 if (attn & BNX2X_PMF_LINK_ASSERT) {
3096 int func = BP_FUNC(bp);
3097
3098 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003099 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3100 func_mf_config[BP_ABS_FUNC(bp)].config);
3101 val = SHMEM_RD(bp,
3102 func_mb[BP_FW_MB_IDX(bp)].drv_status);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003103 if (val & DRV_STATUS_DCC_EVENT_MASK)
3104 bnx2x_dcc_event(bp,
3105 (val & DRV_STATUS_DCC_EVENT_MASK));
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08003106
3107 if (val & DRV_STATUS_SET_MF_BW)
3108 bnx2x_set_mf_bw(bp);
3109
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003110 bnx2x__link_status_update(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003111 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003112 bnx2x_pmf_update(bp);
3113
Vladislav Zolotarove4901dd2010-12-13 05:44:18 +00003114 if (bp->port.pmf &&
Shmulik Ravid785b9b12010-12-30 06:27:03 +00003115 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
3116 bp->dcbx_enabled > 0)
Vladislav Zolotarove4901dd2010-12-13 05:44:18 +00003117 /* start dcbx state machine */
3118 bnx2x_dcbx_set_params(bp,
3119 BNX2X_DCBX_STATE_NEG_RECEIVED);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003120 } else if (attn & BNX2X_MC_ASSERT_BITS) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003121
3122 BNX2X_ERR("MC assert!\n");
3123 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3124 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3125 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3126 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3127 bnx2x_panic();
3128
3129 } else if (attn & BNX2X_MCP_ASSERT) {
3130
3131 BNX2X_ERR("MCP assert!\n");
3132 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003133 bnx2x_fw_dump(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003134
3135 } else
3136 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3137 }
3138
3139 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003140 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3141 if (attn & BNX2X_GRC_TIMEOUT) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003142 val = CHIP_IS_E1(bp) ? 0 :
3143 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003144 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3145 }
3146 if (attn & BNX2X_GRC_RSV) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003147 val = CHIP_IS_E1(bp) ? 0 :
3148 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003149 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3150 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003151 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003152 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003153}
3154
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003155#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3156#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3157#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3158#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3159#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003160
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003161/*
3162 * should be run under rtnl lock
3163 */
3164static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3165{
3166 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3167 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3168 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3169 barrier();
3170 mmiowb();
3171}
3172
3173/*
3174 * should be run under rtnl lock
3175 */
3176static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3177{
3178 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3179 val |= (1 << 16);
3180 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3181 barrier();
3182 mmiowb();
3183}
3184
3185/*
3186 * should be run under rtnl lock
3187 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003188bool bnx2x_reset_is_done(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003189{
3190 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3191 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3192 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3193}
3194
3195/*
3196 * should be run under rtnl lock
3197 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003198inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003199{
3200 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3201
3202 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3203
3204 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3205 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3206 barrier();
3207 mmiowb();
3208}
3209
3210/*
3211 * should be run under rtnl lock
3212 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003213u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003214{
3215 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3216
3217 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3218
3219 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3220 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3221 barrier();
3222 mmiowb();
3223
3224 return val1;
3225}
3226
3227/*
3228 * should be run under rtnl lock
3229 */
3230static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3231{
3232 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3233}
3234
3235static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3236{
3237 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3238 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3239}
3240
3241static inline void _print_next_block(int idx, const char *blk)
3242{
3243 if (idx)
3244 pr_cont(", ");
3245 pr_cont("%s", blk);
3246}
3247
3248static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3249{
3250 int i = 0;
3251 u32 cur_bit = 0;
3252 for (i = 0; sig; i++) {
3253 cur_bit = ((u32)0x1 << i);
3254 if (sig & cur_bit) {
3255 switch (cur_bit) {
3256 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3257 _print_next_block(par_num++, "BRB");
3258 break;
3259 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3260 _print_next_block(par_num++, "PARSER");
3261 break;
3262 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3263 _print_next_block(par_num++, "TSDM");
3264 break;
3265 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3266 _print_next_block(par_num++, "SEARCHER");
3267 break;
3268 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3269 _print_next_block(par_num++, "TSEMI");
3270 break;
3271 }
3272
3273 /* Clear the bit */
3274 sig &= ~cur_bit;
3275 }
3276 }
3277
3278 return par_num;
3279}
3280
3281static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3282{
3283 int i = 0;
3284 u32 cur_bit = 0;
3285 for (i = 0; sig; i++) {
3286 cur_bit = ((u32)0x1 << i);
3287 if (sig & cur_bit) {
3288 switch (cur_bit) {
3289 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3290 _print_next_block(par_num++, "PBCLIENT");
3291 break;
3292 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3293 _print_next_block(par_num++, "QM");
3294 break;
3295 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3296 _print_next_block(par_num++, "XSDM");
3297 break;
3298 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3299 _print_next_block(par_num++, "XSEMI");
3300 break;
3301 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3302 _print_next_block(par_num++, "DOORBELLQ");
3303 break;
3304 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3305 _print_next_block(par_num++, "VAUX PCI CORE");
3306 break;
3307 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3308 _print_next_block(par_num++, "DEBUG");
3309 break;
3310 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3311 _print_next_block(par_num++, "USDM");
3312 break;
3313 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3314 _print_next_block(par_num++, "USEMI");
3315 break;
3316 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3317 _print_next_block(par_num++, "UPB");
3318 break;
3319 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3320 _print_next_block(par_num++, "CSDM");
3321 break;
3322 }
3323
3324 /* Clear the bit */
3325 sig &= ~cur_bit;
3326 }
3327 }
3328
3329 return par_num;
3330}
3331
3332static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3333{
3334 int i = 0;
3335 u32 cur_bit = 0;
3336 for (i = 0; sig; i++) {
3337 cur_bit = ((u32)0x1 << i);
3338 if (sig & cur_bit) {
3339 switch (cur_bit) {
3340 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3341 _print_next_block(par_num++, "CSEMI");
3342 break;
3343 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3344 _print_next_block(par_num++, "PXP");
3345 break;
3346 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3347 _print_next_block(par_num++,
3348 "PXPPCICLOCKCLIENT");
3349 break;
3350 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3351 _print_next_block(par_num++, "CFC");
3352 break;
3353 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3354 _print_next_block(par_num++, "CDU");
3355 break;
3356 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3357 _print_next_block(par_num++, "IGU");
3358 break;
3359 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3360 _print_next_block(par_num++, "MISC");
3361 break;
3362 }
3363
3364 /* Clear the bit */
3365 sig &= ~cur_bit;
3366 }
3367 }
3368
3369 return par_num;
3370}
3371
3372static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3373{
3374 int i = 0;
3375 u32 cur_bit = 0;
3376 for (i = 0; sig; i++) {
3377 cur_bit = ((u32)0x1 << i);
3378 if (sig & cur_bit) {
3379 switch (cur_bit) {
3380 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3381 _print_next_block(par_num++, "MCP ROM");
3382 break;
3383 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3384 _print_next_block(par_num++, "MCP UMP RX");
3385 break;
3386 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3387 _print_next_block(par_num++, "MCP UMP TX");
3388 break;
3389 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3390 _print_next_block(par_num++, "MCP SCPAD");
3391 break;
3392 }
3393
3394 /* Clear the bit */
3395 sig &= ~cur_bit;
3396 }
3397 }
3398
3399 return par_num;
3400}
3401
3402static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3403 u32 sig2, u32 sig3)
3404{
3405 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3406 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3407 int par_num = 0;
3408 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3409 "[0]:0x%08x [1]:0x%08x "
3410 "[2]:0x%08x [3]:0x%08x\n",
3411 sig0 & HW_PRTY_ASSERT_SET_0,
3412 sig1 & HW_PRTY_ASSERT_SET_1,
3413 sig2 & HW_PRTY_ASSERT_SET_2,
3414 sig3 & HW_PRTY_ASSERT_SET_3);
3415 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3416 bp->dev->name);
3417 par_num = bnx2x_print_blocks_with_parity0(
3418 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3419 par_num = bnx2x_print_blocks_with_parity1(
3420 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3421 par_num = bnx2x_print_blocks_with_parity2(
3422 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3423 par_num = bnx2x_print_blocks_with_parity3(
3424 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3425 printk("\n");
3426 return true;
3427 } else
3428 return false;
3429}
3430
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003431bool bnx2x_chk_parity_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003432{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003433 struct attn_route attn;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003434 int port = BP_PORT(bp);
3435
3436 attn.sig[0] = REG_RD(bp,
3437 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3438 port*4);
3439 attn.sig[1] = REG_RD(bp,
3440 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3441 port*4);
3442 attn.sig[2] = REG_RD(bp,
3443 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3444 port*4);
3445 attn.sig[3] = REG_RD(bp,
3446 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3447 port*4);
3448
3449 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3450 attn.sig[3]);
3451}
3452
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003453
3454static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3455{
3456 u32 val;
3457 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3458
3459 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3460 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3461 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3462 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3463 "ADDRESS_ERROR\n");
3464 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3465 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3466 "INCORRECT_RCV_BEHAVIOR\n");
3467 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3468 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3469 "WAS_ERROR_ATTN\n");
3470 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3471 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3472 "VF_LENGTH_VIOLATION_ATTN\n");
3473 if (val &
3474 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3475 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3476 "VF_GRC_SPACE_VIOLATION_ATTN\n");
3477 if (val &
3478 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3479 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3480 "VF_MSIX_BAR_VIOLATION_ATTN\n");
3481 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3482 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3483 "TCPL_ERROR_ATTN\n");
3484 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3485 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3486 "TCPL_IN_TWO_RCBS_ATTN\n");
3487 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3488 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3489 "CSSNOOP_FIFO_OVERFLOW\n");
3490 }
3491 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3492 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3493 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3494 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3495 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3496 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3497 BNX2X_ERR("ATC_ATC_INT_STS_REG"
3498 "_ATC_TCPL_TO_NOT_PEND\n");
3499 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3500 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3501 "ATC_GPA_MULTIPLE_HITS\n");
3502 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3503 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3504 "ATC_RCPL_TO_EMPTY_CNT\n");
3505 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3506 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3507 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3508 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3509 "ATC_IREQ_LESS_THAN_STU\n");
3510 }
3511
3512 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3513 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3514 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3515 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3516 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3517 }
3518
3519}
3520
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003521static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3522{
3523 struct attn_route attn, *group_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003524 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003525 int index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003526 u32 reg_addr;
3527 u32 val;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003528 u32 aeu_mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003529
3530 /* need to take HW lock because MCP or other port might also
3531 try to handle this event */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003532 bnx2x_acquire_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003533
Vladislav Zolotarov4a33bc02011-01-09 02:20:04 +00003534 if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) {
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003535 bp->recovery_state = BNX2X_RECOVERY_INIT;
3536 bnx2x_set_reset_in_progress(bp);
3537 schedule_delayed_work(&bp->reset_task, 0);
3538 /* Disable HW interrupts */
3539 bnx2x_int_disable(bp);
3540 bnx2x_release_alr(bp);
3541 /* In case of parity errors don't handle attentions so that
3542 * other function would "see" parity errors.
3543 */
3544 return;
3545 }
3546
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003547 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3548 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3549 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3550 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003551 if (CHIP_IS_E2(bp))
3552 attn.sig[4] =
3553 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3554 else
3555 attn.sig[4] = 0;
3556
3557 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3558 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003559
3560 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3561 if (deasserted & (1 << index)) {
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003562 group_mask = &bp->attn_group[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003563
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003564 DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3565 "%08x %08x %08x\n",
3566 index,
3567 group_mask->sig[0], group_mask->sig[1],
3568 group_mask->sig[2], group_mask->sig[3],
3569 group_mask->sig[4]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003570
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003571 bnx2x_attn_int_deasserted4(bp,
3572 attn.sig[4] & group_mask->sig[4]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003573 bnx2x_attn_int_deasserted3(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003574 attn.sig[3] & group_mask->sig[3]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003575 bnx2x_attn_int_deasserted1(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003576 attn.sig[1] & group_mask->sig[1]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003577 bnx2x_attn_int_deasserted2(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003578 attn.sig[2] & group_mask->sig[2]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003579 bnx2x_attn_int_deasserted0(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003580 attn.sig[0] & group_mask->sig[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003581 }
3582 }
3583
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003584 bnx2x_release_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003585
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003586 if (bp->common.int_block == INT_BLOCK_HC)
3587 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3588 COMMAND_REG_ATTN_BITS_CLR);
3589 else
3590 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003591
3592 val = ~deasserted;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003593 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3594 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
Eilon Greenstein5c862842008-08-13 15:51:48 -07003595 REG_WR(bp, reg_addr, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003596
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003597 if (~bp->attn_state & deasserted)
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003598 BNX2X_ERR("IGU ERROR\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003599
3600 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3601 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3602
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003603 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3604 aeu_mask = REG_RD(bp, reg_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003605
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003606 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3607 aeu_mask, deasserted);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003608 aeu_mask |= (deasserted & 0x3ff);
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003609 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3610
3611 REG_WR(bp, reg_addr, aeu_mask);
3612 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003613
3614 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3615 bp->attn_state &= ~deasserted;
3616 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3617}
3618
3619static void bnx2x_attn_int(struct bnx2x *bp)
3620{
3621 /* read local copy of bits */
Eilon Greenstein68d59482009-01-14 21:27:36 -08003622 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3623 attn_bits);
3624 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3625 attn_bits_ack);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003626 u32 attn_state = bp->attn_state;
3627
3628 /* look for changed bits */
3629 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3630 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3631
3632 DP(NETIF_MSG_HW,
3633 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3634 attn_bits, attn_ack, asserted, deasserted);
3635
3636 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003637 BNX2X_ERR("BAD attention state\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003638
3639 /* handle bits that were raised */
3640 if (asserted)
3641 bnx2x_attn_int_asserted(bp, asserted);
3642
3643 if (deasserted)
3644 bnx2x_attn_int_deasserted(bp, deasserted);
3645}
3646
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003647static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3648{
3649 /* No memory barriers */
3650 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3651 mmiowb(); /* keep prod updates ordered */
3652}
3653
3654#ifdef BCM_CNIC
3655static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3656 union event_ring_elem *elem)
3657{
3658 if (!bp->cnic_eth_dev.starting_cid ||
3659 cid < bp->cnic_eth_dev.starting_cid)
3660 return 1;
3661
3662 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3663
3664 if (unlikely(elem->message.data.cfc_del_event.error)) {
3665 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3666 cid);
3667 bnx2x_panic_dump(bp);
3668 }
3669 bnx2x_cnic_cfc_comp(bp, cid);
3670 return 0;
3671}
3672#endif
3673
3674static void bnx2x_eq_int(struct bnx2x *bp)
3675{
3676 u16 hw_cons, sw_cons, sw_prod;
3677 union event_ring_elem *elem;
3678 u32 cid;
3679 u8 opcode;
3680 int spqe_cnt = 0;
3681
3682 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3683
3684 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3685 * when we get the the next-page we nned to adjust so the loop
3686 * condition below will be met. The next element is the size of a
3687 * regular element and hence incrementing by 1
3688 */
3689 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3690 hw_cons++;
3691
3692 /* This function may never run in parralel with itself for a
3693 * specific bp, thus there is no need in "paired" read memory
3694 * barrier here.
3695 */
3696 sw_cons = bp->eq_cons;
3697 sw_prod = bp->eq_prod;
3698
3699 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n",
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00003700 hw_cons, sw_cons, atomic_read(&bp->spq_left));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003701
3702 for (; sw_cons != hw_cons;
3703 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3704
3705
3706 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3707
3708 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3709 opcode = elem->message.opcode;
3710
3711
3712 /* handle eq element */
3713 switch (opcode) {
3714 case EVENT_RING_OPCODE_STAT_QUERY:
3715 DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3716 /* nothing to do with stats comp */
3717 continue;
3718
3719 case EVENT_RING_OPCODE_CFC_DEL:
3720 /* handle according to cid range */
3721 /*
3722 * we may want to verify here that the bp state is
3723 * HALTING
3724 */
3725 DP(NETIF_MSG_IFDOWN,
3726 "got delete ramrod for MULTI[%d]\n", cid);
3727#ifdef BCM_CNIC
3728 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3729 goto next_spqe;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003730 if (cid == BNX2X_FCOE_ETH_CID)
3731 bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
3732 else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003733#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003734 bnx2x_fp(bp, cid, state) =
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003735 BNX2X_FP_STATE_CLOSED;
3736
3737 goto next_spqe;
Vladislav Zolotarove4901dd2010-12-13 05:44:18 +00003738
3739 case EVENT_RING_OPCODE_STOP_TRAFFIC:
3740 DP(NETIF_MSG_IFUP, "got STOP TRAFFIC\n");
3741 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
3742 goto next_spqe;
3743 case EVENT_RING_OPCODE_START_TRAFFIC:
3744 DP(NETIF_MSG_IFUP, "got START TRAFFIC\n");
3745 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
3746 goto next_spqe;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003747 }
3748
3749 switch (opcode | bp->state) {
3750 case (EVENT_RING_OPCODE_FUNCTION_START |
3751 BNX2X_STATE_OPENING_WAIT4_PORT):
3752 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3753 bp->state = BNX2X_STATE_FUNC_STARTED;
3754 break;
3755
3756 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3757 BNX2X_STATE_CLOSING_WAIT4_HALT):
3758 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3759 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3760 break;
3761
3762 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3763 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3764 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3765 bp->set_mac_pending = 0;
3766 break;
3767
3768 case (EVENT_RING_OPCODE_SET_MAC |
3769 BNX2X_STATE_CLOSING_WAIT4_HALT):
3770 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3771 bp->set_mac_pending = 0;
3772 break;
3773 default:
3774 /* unknown event log error and continue */
3775 BNX2X_ERR("Unknown EQ event %d\n",
3776 elem->message.opcode);
3777 }
3778next_spqe:
3779 spqe_cnt++;
3780 } /* for */
3781
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00003782 smp_mb__before_atomic_inc();
3783 atomic_add(spqe_cnt, &bp->spq_left);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003784
3785 bp->eq_cons = sw_cons;
3786 bp->eq_prod = sw_prod;
3787 /* Make sure that above mem writes were issued towards the memory */
3788 smp_wmb();
3789
3790 /* update producer */
3791 bnx2x_update_eq_prod(bp, bp->eq_prod);
3792}
3793
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003794static void bnx2x_sp_task(struct work_struct *work)
3795{
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003796 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003797 u16 status;
3798
3799 /* Return here if interrupt is disabled */
3800 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003801 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003802 return;
3803 }
3804
3805 status = bnx2x_update_dsb_idx(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003806/* if (status == 0) */
3807/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003808
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003809 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003810
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003811 /* HW attentions */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003812 if (status & BNX2X_DEF_SB_ATT_IDX) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003813 bnx2x_attn_int(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003814 status &= ~BNX2X_DEF_SB_ATT_IDX;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003815 }
3816
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003817 /* SP events: STAT_QUERY and others */
3818 if (status & BNX2X_DEF_SB_IDX) {
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003819#ifdef BCM_CNIC
3820 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003821
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003822 if ((!NO_FCOE(bp)) &&
3823 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp)))
3824 napi_schedule(&bnx2x_fcoe(bp, napi));
3825#endif
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003826 /* Handle EQ completions */
3827 bnx2x_eq_int(bp);
3828
3829 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3830 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3831
3832 status &= ~BNX2X_DEF_SB_IDX;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003833 }
3834
3835 if (unlikely(status))
3836 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3837 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003838
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003839 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3840 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003841}
3842
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003843irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003844{
3845 struct net_device *dev = dev_instance;
3846 struct bnx2x *bp = netdev_priv(dev);
3847
3848 /* Return here if interrupt is disabled */
3849 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003850 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003851 return IRQ_HANDLED;
3852 }
3853
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003854 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3855 IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003856
3857#ifdef BNX2X_STOP_ON_ERROR
3858 if (unlikely(bp->panic))
3859 return IRQ_HANDLED;
3860#endif
3861
Michael Chan993ac7b2009-10-10 13:46:56 +00003862#ifdef BCM_CNIC
3863 {
3864 struct cnic_ops *c_ops;
3865
3866 rcu_read_lock();
3867 c_ops = rcu_dereference(bp->cnic_ops);
3868 if (c_ops)
3869 c_ops->cnic_handler(bp->cnic_data, NULL);
3870 rcu_read_unlock();
3871 }
3872#endif
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003873 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003874
3875 return IRQ_HANDLED;
3876}
3877
3878/* end of slow path */
3879
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003880static void bnx2x_timer(unsigned long data)
3881{
3882 struct bnx2x *bp = (struct bnx2x *) data;
3883
3884 if (!netif_running(bp->dev))
3885 return;
3886
3887 if (atomic_read(&bp->intr_sem) != 0)
Eliezer Tamirf1410642008-02-28 11:51:50 -08003888 goto timer_restart;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003889
3890 if (poll) {
3891 struct bnx2x_fastpath *fp = &bp->fp[0];
3892 int rc;
3893
Eilon Greenstein7961f792009-03-02 07:59:31 +00003894 bnx2x_tx_int(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003895 rc = bnx2x_rx_int(fp, 1000);
3896 }
3897
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003898 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003899 int mb_idx = BP_FW_MB_IDX(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003900 u32 drv_pulse;
3901 u32 mcp_pulse;
3902
3903 ++bp->fw_drv_pulse_wr_seq;
3904 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3905 /* TBD - add SYSTEM_TIME */
3906 drv_pulse = bp->fw_drv_pulse_wr_seq;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003907 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003908
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003909 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003910 MCP_PULSE_SEQ_MASK);
3911 /* The delta between driver pulse and mcp response
3912 * should be 1 (before mcp response) or 0 (after mcp response)
3913 */
3914 if ((drv_pulse != mcp_pulse) &&
3915 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3916 /* someone lost a heartbeat... */
3917 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3918 drv_pulse, mcp_pulse);
3919 }
3920 }
3921
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07003922 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003923 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003924
Eliezer Tamirf1410642008-02-28 11:51:50 -08003925timer_restart:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003926 mod_timer(&bp->timer, jiffies + bp->current_interval);
3927}
3928
3929/* end of Statistics */
3930
3931/* nic init */
3932
3933/*
3934 * nic init service functions
3935 */
3936
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003937static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003938{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003939 u32 i;
3940 if (!(len%4) && !(addr%4))
3941 for (i = 0; i < len; i += 4)
3942 REG_WR(bp, addr + i, fill);
3943 else
3944 for (i = 0; i < len; i++)
3945 REG_WR8(bp, addr + i, fill);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003946
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003947}
3948
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003949/* helper: writes FP SP data to FW - data_size in dwords */
3950static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3951 int fw_sb_id,
3952 u32 *sb_data_p,
3953 u32 data_size)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003954{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003955 int index;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003956 for (index = 0; index < data_size; index++)
3957 REG_WR(bp, BAR_CSTRORM_INTMEM +
3958 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3959 sizeof(u32)*index,
3960 *(sb_data_p + index));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003961}
3962
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003963static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3964{
3965 u32 *sb_data_p;
3966 u32 data_size = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003967 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003968 struct hc_status_block_data_e1x sb_data_e1x;
3969
3970 /* disable the function first */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003971 if (CHIP_IS_E2(bp)) {
3972 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3973 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3974 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3975 sb_data_e2.common.p_func.vf_valid = false;
3976 sb_data_p = (u32 *)&sb_data_e2;
3977 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3978 } else {
3979 memset(&sb_data_e1x, 0,
3980 sizeof(struct hc_status_block_data_e1x));
3981 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3982 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3983 sb_data_e1x.common.p_func.vf_valid = false;
3984 sb_data_p = (u32 *)&sb_data_e1x;
3985 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3986 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003987 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3988
3989 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3990 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
3991 CSTORM_STATUS_BLOCK_SIZE);
3992 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3993 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
3994 CSTORM_SYNC_BLOCK_SIZE);
3995}
3996
3997/* helper: writes SP SB data to FW */
3998static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
3999 struct hc_sp_status_block_data *sp_sb_data)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004000{
4001 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004002 int i;
4003 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
4004 REG_WR(bp, BAR_CSTRORM_INTMEM +
4005 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
4006 i*sizeof(u32),
4007 *((u32 *)sp_sb_data + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004008}
4009
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004010static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
4011{
4012 int func = BP_FUNC(bp);
4013 struct hc_sp_status_block_data sp_sb_data;
4014 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4015
4016 sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
4017 sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
4018 sp_sb_data.p_func.vf_valid = false;
4019
4020 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
4021
4022 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4023 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
4024 CSTORM_SP_STATUS_BLOCK_SIZE);
4025 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4026 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
4027 CSTORM_SP_SYNC_BLOCK_SIZE);
4028
4029}
4030
4031
4032static inline
4033void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
4034 int igu_sb_id, int igu_seg_id)
4035{
4036 hc_sm->igu_sb_id = igu_sb_id;
4037 hc_sm->igu_seg_id = igu_seg_id;
4038 hc_sm->timer_value = 0xFF;
4039 hc_sm->time_to_expire = 0xFFFFFFFF;
4040}
4041
stephen hemminger8d962862010-10-21 07:50:56 +00004042static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004043 u8 vf_valid, int fw_sb_id, int igu_sb_id)
4044{
4045 int igu_seg_id;
4046
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004047 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004048 struct hc_status_block_data_e1x sb_data_e1x;
4049 struct hc_status_block_sm *hc_sm_p;
4050 struct hc_index_data *hc_index_p;
4051 int data_size;
4052 u32 *sb_data_p;
4053
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004054 if (CHIP_INT_MODE_IS_BC(bp))
4055 igu_seg_id = HC_SEG_ACCESS_NORM;
4056 else
4057 igu_seg_id = IGU_SEG_ACCESS_NORM;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004058
4059 bnx2x_zero_fp_sb(bp, fw_sb_id);
4060
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004061 if (CHIP_IS_E2(bp)) {
4062 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
4063 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
4064 sb_data_e2.common.p_func.vf_id = vfid;
4065 sb_data_e2.common.p_func.vf_valid = vf_valid;
4066 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
4067 sb_data_e2.common.same_igu_sb_1b = true;
4068 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
4069 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
4070 hc_sm_p = sb_data_e2.common.state_machine;
4071 hc_index_p = sb_data_e2.index_data;
4072 sb_data_p = (u32 *)&sb_data_e2;
4073 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
4074 } else {
4075 memset(&sb_data_e1x, 0,
4076 sizeof(struct hc_status_block_data_e1x));
4077 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
4078 sb_data_e1x.common.p_func.vf_id = 0xff;
4079 sb_data_e1x.common.p_func.vf_valid = false;
4080 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
4081 sb_data_e1x.common.same_igu_sb_1b = true;
4082 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
4083 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
4084 hc_sm_p = sb_data_e1x.common.state_machine;
4085 hc_index_p = sb_data_e1x.index_data;
4086 sb_data_p = (u32 *)&sb_data_e1x;
4087 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
4088 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004089
4090 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
4091 igu_sb_id, igu_seg_id);
4092 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
4093 igu_sb_id, igu_seg_id);
4094
4095 DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
4096
4097 /* write indecies to HW */
4098 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
4099}
4100
4101static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
4102 u8 sb_index, u8 disable, u16 usec)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004103{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004104 int port = BP_PORT(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004105 u8 ticks = usec / BNX2X_BTR;
4106
4107 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4108
4109 disable = disable ? 1 : (usec ? 0 : 1);
4110 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4111}
4112
4113static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
4114 u16 tx_usec, u16 rx_usec)
4115{
4116 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
4117 false, rx_usec);
4118 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
4119 false, tx_usec);
4120}
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004121
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004122static void bnx2x_init_def_sb(struct bnx2x *bp)
4123{
4124 struct host_sp_status_block *def_sb = bp->def_status_blk;
4125 dma_addr_t mapping = bp->def_status_blk_mapping;
4126 int igu_sp_sb_index;
4127 int igu_seg_id;
4128 int port = BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004129 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004130 int reg_offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004131 u64 section;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004132 int index;
4133 struct hc_sp_status_block_data sp_sb_data;
4134 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4135
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004136 if (CHIP_INT_MODE_IS_BC(bp)) {
4137 igu_sp_sb_index = DEF_SB_IGU_ID;
4138 igu_seg_id = HC_SEG_ACCESS_DEF;
4139 } else {
4140 igu_sp_sb_index = bp->igu_dsb_id;
4141 igu_seg_id = IGU_SEG_ACCESS_DEF;
4142 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004143
4144 /* ATTN */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004145 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004146 atten_status_block);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004147 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004148
Eliezer Tamir49d66772008-02-28 11:53:13 -08004149 bp->attn_state = 0;
4150
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004151 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4152 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004153 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004154 int sindex;
4155 /* take care of sig[0]..sig[4] */
4156 for (sindex = 0; sindex < 4; sindex++)
4157 bp->attn_group[index].sig[sindex] =
4158 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004159
4160 if (CHIP_IS_E2(bp))
4161 /*
4162 * enable5 is separate from the rest of the registers,
4163 * and therefore the address skip is 4
4164 * and not 16 between the different groups
4165 */
4166 bp->attn_group[index].sig[4] = REG_RD(bp,
4167 reg_offset + 0x10 + 0x4*index);
4168 else
4169 bp->attn_group[index].sig[4] = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004170 }
4171
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004172 if (bp->common.int_block == INT_BLOCK_HC) {
4173 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4174 HC_REG_ATTN_MSG0_ADDR_L);
4175
4176 REG_WR(bp, reg_offset, U64_LO(section));
4177 REG_WR(bp, reg_offset + 4, U64_HI(section));
4178 } else if (CHIP_IS_E2(bp)) {
4179 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4180 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4181 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004182
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004183 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4184 sp_sb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004185
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004186 bnx2x_zero_sp_sb(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004187
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004188 sp_sb_data.host_sb_addr.lo = U64_LO(section);
4189 sp_sb_data.host_sb_addr.hi = U64_HI(section);
4190 sp_sb_data.igu_sb_id = igu_sp_sb_index;
4191 sp_sb_data.igu_seg_id = igu_seg_id;
4192 sp_sb_data.p_func.pf_id = func;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004193 sp_sb_data.p_func.vnic_id = BP_VN(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004194 sp_sb_data.p_func.vf_id = 0xff;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004195
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004196 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004197
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004198 bp->stats_pending = 0;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004199 bp->set_mac_pending = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004200
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004201 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004202}
4203
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004204void bnx2x_update_coalesce(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004205{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004206 int i;
4207
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004208 for_each_eth_queue(bp, i)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004209 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
4210 bp->rx_ticks, bp->tx_ticks);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004211}
4212
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004213static void bnx2x_init_sp_ring(struct bnx2x *bp)
4214{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004215 spin_lock_init(&bp->spq_lock);
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00004216 atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004217
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004218 bp->spq_prod_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004219 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4220 bp->spq_prod_bd = bp->spq;
4221 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004222}
4223
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004224static void bnx2x_init_eq_ring(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004225{
4226 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004227 for (i = 1; i <= NUM_EQ_PAGES; i++) {
4228 union event_ring_elem *elem =
4229 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004230
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004231 elem->next_page.addr.hi =
4232 cpu_to_le32(U64_HI(bp->eq_mapping +
4233 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
4234 elem->next_page.addr.lo =
4235 cpu_to_le32(U64_LO(bp->eq_mapping +
4236 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004237 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004238 bp->eq_cons = 0;
4239 bp->eq_prod = NUM_EQ_DESC;
4240 bp->eq_cons_sb = BNX2X_EQ_INDEX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004241}
4242
4243static void bnx2x_init_ind_table(struct bnx2x *bp)
4244{
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08004245 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004246 int i;
4247
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004248 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004249 return;
4250
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004251 DP(NETIF_MSG_IFUP,
4252 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004253 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004254 REG_WR8(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08004255 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004256 bp->fp->cl_id + (i % (bp->num_queues -
4257 NONE_ETH_CONTEXT_USE)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004258}
4259
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004260void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004261{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004262 int mode = bp->rx_mode;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004263 int port = BP_PORT(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004264 u16 cl_id;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004265 u32 def_q_filters = 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004266
Eilon Greenstein581ce432009-07-29 00:20:04 +00004267 /* All but management unicast packets should pass to the host as well */
4268 u32 llh_mask =
4269 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4270 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4271 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4272 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004273
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004274 switch (mode) {
4275 case BNX2X_RX_MODE_NONE: /* no Rx */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004276 def_q_filters = BNX2X_ACCEPT_NONE;
4277#ifdef BCM_CNIC
4278 if (!NO_FCOE(bp)) {
4279 cl_id = bnx2x_fcoe(bp, cl_id);
4280 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4281 }
4282#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004283 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004284
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004285 case BNX2X_RX_MODE_NORMAL:
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004286 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4287 BNX2X_ACCEPT_MULTICAST;
4288#ifdef BCM_CNIC
Vladislav Zolotarov711c9142011-02-06 11:21:49 -08004289 if (!NO_FCOE(bp)) {
4290 cl_id = bnx2x_fcoe(bp, cl_id);
4291 bnx2x_rxq_set_mac_filters(bp, cl_id,
4292 BNX2X_ACCEPT_UNICAST |
4293 BNX2X_ACCEPT_MULTICAST);
4294 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004295#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004296 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004297
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004298 case BNX2X_RX_MODE_ALLMULTI:
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004299 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4300 BNX2X_ACCEPT_ALL_MULTICAST;
4301#ifdef BCM_CNIC
Vladislav Zolotarov711c9142011-02-06 11:21:49 -08004302 /*
4303 * Prevent duplication of multicast packets by configuring FCoE
4304 * L2 Client to receive only matched unicast frames.
4305 */
4306 if (!NO_FCOE(bp)) {
4307 cl_id = bnx2x_fcoe(bp, cl_id);
4308 bnx2x_rxq_set_mac_filters(bp, cl_id,
4309 BNX2X_ACCEPT_UNICAST);
4310 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004311#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004312 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004313
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004314 case BNX2X_RX_MODE_PROMISC:
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004315 def_q_filters |= BNX2X_PROMISCUOUS_MODE;
4316#ifdef BCM_CNIC
Vladislav Zolotarov711c9142011-02-06 11:21:49 -08004317 /*
4318 * Prevent packets duplication by configuring DROP_ALL for FCoE
4319 * L2 Client.
4320 */
4321 if (!NO_FCOE(bp)) {
4322 cl_id = bnx2x_fcoe(bp, cl_id);
4323 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4324 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004325#endif
Eilon Greenstein581ce432009-07-29 00:20:04 +00004326 /* pass management unicast packets as well */
4327 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004328 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004329
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004330 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004331 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4332 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004333 }
4334
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004335 cl_id = BP_L_ID(bp);
4336 bnx2x_rxq_set_mac_filters(bp, cl_id, def_q_filters);
4337
Eilon Greenstein581ce432009-07-29 00:20:04 +00004338 REG_WR(bp,
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004339 (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
4340 NIG_REG_LLH0_BRB1_DRV_MASK), llh_mask);
Eilon Greenstein581ce432009-07-29 00:20:04 +00004341
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004342 DP(NETIF_MSG_IFUP, "rx mode %d\n"
4343 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004344 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n"
4345 "unmatched_ucast 0x%x\n", mode,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004346 bp->mac_filters.ucast_drop_all,
4347 bp->mac_filters.mcast_drop_all,
4348 bp->mac_filters.bcast_drop_all,
4349 bp->mac_filters.ucast_accept_all,
4350 bp->mac_filters.mcast_accept_all,
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004351 bp->mac_filters.bcast_accept_all,
4352 bp->mac_filters.unmatched_unicast
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004353 );
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004354
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004355 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004356}
4357
Eilon Greenstein471de712008-08-13 15:49:35 -07004358static void bnx2x_init_internal_common(struct bnx2x *bp)
4359{
4360 int i;
4361
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004362 if (!CHIP_IS_E1(bp)) {
4363
4364 /* xstorm needs to know whether to add ovlan to packets or not,
4365 * in switch-independent we'll write 0 to here... */
4366 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004367 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004368 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004369 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004370 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004371 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004372 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004373 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004374 }
4375
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08004376 if (IS_MF_SI(bp))
4377 /*
4378 * In switch independent mode, the TSTORM needs to accept
4379 * packets that failed classification, since approximate match
4380 * mac addresses aren't written to NIG LLH
4381 */
4382 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4383 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
4384
Eilon Greenstein471de712008-08-13 15:49:35 -07004385 /* Zero this manually as its initialization is
4386 currently missing in the initTool */
4387 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4388 REG_WR(bp, BAR_USTRORM_INTMEM +
4389 USTORM_AGG_DATA_OFFSET + i * 4, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004390 if (CHIP_IS_E2(bp)) {
4391 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
4392 CHIP_INT_MODE_IS_BC(bp) ?
4393 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
4394 }
Eilon Greenstein471de712008-08-13 15:49:35 -07004395}
4396
4397static void bnx2x_init_internal_port(struct bnx2x *bp)
4398{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004399 /* port */
Vladislav Zolotarove4901dd2010-12-13 05:44:18 +00004400 bnx2x_dcb_init_intmem_pfc(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004401}
4402
Eilon Greenstein471de712008-08-13 15:49:35 -07004403static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4404{
4405 switch (load_code) {
4406 case FW_MSG_CODE_DRV_LOAD_COMMON:
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004407 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
Eilon Greenstein471de712008-08-13 15:49:35 -07004408 bnx2x_init_internal_common(bp);
4409 /* no break */
4410
4411 case FW_MSG_CODE_DRV_LOAD_PORT:
4412 bnx2x_init_internal_port(bp);
4413 /* no break */
4414
4415 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004416 /* internal memory per function is
4417 initialized inside bnx2x_pf_init */
Eilon Greenstein471de712008-08-13 15:49:35 -07004418 break;
4419
4420 default:
4421 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4422 break;
4423 }
4424}
4425
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004426static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
4427{
4428 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
4429
4430 fp->state = BNX2X_FP_STATE_CLOSED;
4431
4432 fp->index = fp->cid = fp_idx;
4433 fp->cl_id = BP_L_ID(bp) + fp_idx;
4434 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
4435 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
4436 /* qZone id equals to FW (per path) client id */
4437 fp->cl_qzone_id = fp->cl_id +
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004438 BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4439 ETH_MAX_RX_CLIENTS_E1H);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004440 /* init shortcut */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004441 fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4442 USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004443 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4444 /* Setup SB indicies */
4445 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4446 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4447
4448 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
4449 "cl_id %d fw_sb %d igu_sb %d\n",
4450 fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4451 fp->igu_sb_id);
4452 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4453 fp->fw_sb_id, fp->igu_sb_id);
4454
4455 bnx2x_update_fpsb_idx(fp);
4456}
4457
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004458void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004459{
4460 int i;
4461
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004462 for_each_eth_queue(bp, i)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004463 bnx2x_init_fp_sb(bp, i);
Michael Chan37b091b2009-10-10 13:46:55 +00004464#ifdef BCM_CNIC
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004465 if (!NO_FCOE(bp))
4466 bnx2x_init_fcoe_fp(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004467
4468 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4469 BNX2X_VF_ID_INVALID, false,
4470 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4471
Michael Chan37b091b2009-10-10 13:46:55 +00004472#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004473
Eilon Greenstein16119782009-03-02 07:59:27 +00004474 /* ensure status block indices were read */
4475 rmb();
4476
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004477 bnx2x_init_def_sb(bp);
Eilon Greenstein5c862842008-08-13 15:51:48 -07004478 bnx2x_update_dsb_idx(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004479 bnx2x_init_rx_rings(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004480 bnx2x_init_tx_rings(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004481 bnx2x_init_sp_ring(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004482 bnx2x_init_eq_ring(bp);
Eilon Greenstein471de712008-08-13 15:49:35 -07004483 bnx2x_init_internal(bp, load_code);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004484 bnx2x_pf_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004485 bnx2x_init_ind_table(bp);
Eilon Greenstein0ef00452009-01-14 21:31:08 -08004486 bnx2x_stats_init(bp);
4487
4488 /* At this point, we are ready for interrupts */
4489 atomic_set(&bp->intr_sem, 0);
4490
4491 /* flush all before enabling interrupts */
4492 mb();
4493 mmiowb();
4494
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08004495 bnx2x_int_enable(bp);
Eilon Greensteineb8da202009-07-21 05:47:30 +00004496
4497 /* Check for SPIO5 */
4498 bnx2x_attn_int_deasserted0(bp,
4499 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4500 AEU_INPUTS_ATTN_BITS_SPIO5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004501}
4502
4503/* end of nic init */
4504
4505/*
4506 * gzip service functions
4507 */
4508
4509static int bnx2x_gunzip_init(struct bnx2x *bp)
4510{
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004511 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4512 &bp->gunzip_mapping, GFP_KERNEL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004513 if (bp->gunzip_buf == NULL)
4514 goto gunzip_nomem1;
4515
4516 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4517 if (bp->strm == NULL)
4518 goto gunzip_nomem2;
4519
4520 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4521 GFP_KERNEL);
4522 if (bp->strm->workspace == NULL)
4523 goto gunzip_nomem3;
4524
4525 return 0;
4526
4527gunzip_nomem3:
4528 kfree(bp->strm);
4529 bp->strm = NULL;
4530
4531gunzip_nomem2:
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004532 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4533 bp->gunzip_mapping);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004534 bp->gunzip_buf = NULL;
4535
4536gunzip_nomem1:
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004537 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4538 " un-compression\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004539 return -ENOMEM;
4540}
4541
4542static void bnx2x_gunzip_end(struct bnx2x *bp)
4543{
4544 kfree(bp->strm->workspace);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004545 kfree(bp->strm);
4546 bp->strm = NULL;
4547
4548 if (bp->gunzip_buf) {
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004549 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4550 bp->gunzip_mapping);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004551 bp->gunzip_buf = NULL;
4552 }
4553}
4554
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004555static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004556{
4557 int n, rc;
4558
4559 /* check gzip header */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004560 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4561 BNX2X_ERR("Bad gzip header\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004562 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004563 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004564
4565 n = 10;
4566
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004567#define FNAME 0x8
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004568
4569 if (zbuf[3] & FNAME)
4570 while ((zbuf[n++] != 0) && (n < len));
4571
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004572 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004573 bp->strm->avail_in = len - n;
4574 bp->strm->next_out = bp->gunzip_buf;
4575 bp->strm->avail_out = FW_BUF_SIZE;
4576
4577 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4578 if (rc != Z_OK)
4579 return rc;
4580
4581 rc = zlib_inflate(bp->strm, Z_FINISH);
4582 if ((rc != Z_OK) && (rc != Z_STREAM_END))
Joe Perches7995c642010-02-17 15:01:52 +00004583 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4584 bp->strm->msg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004585
4586 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4587 if (bp->gunzip_outlen & 0x3)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004588 netdev_err(bp->dev, "Firmware decompression error:"
4589 " gunzip_outlen (%d) not aligned\n",
4590 bp->gunzip_outlen);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004591 bp->gunzip_outlen >>= 2;
4592
4593 zlib_inflateEnd(bp->strm);
4594
4595 if (rc == Z_STREAM_END)
4596 return 0;
4597
4598 return rc;
4599}
4600
4601/* nic load/unload */
4602
4603/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004604 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004605 */
4606
4607/* send a NIG loopback debug packet */
4608static void bnx2x_lb_pckt(struct bnx2x *bp)
4609{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004610 u32 wb_write[3];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004611
4612 /* Ethernet source and destination addresses */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004613 wb_write[0] = 0x55555555;
4614 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004615 wb_write[2] = 0x20; /* SOP */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004616 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004617
4618 /* NON-IP protocol */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004619 wb_write[0] = 0x09000000;
4620 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004621 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004622 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004623}
4624
4625/* some of the internal memories
4626 * are not directly readable from the driver
4627 * to test them we send debug packets
4628 */
4629static int bnx2x_int_mem_test(struct bnx2x *bp)
4630{
4631 int factor;
4632 int count, i;
4633 u32 val = 0;
4634
Eilon Greensteinad8d3942008-06-23 20:29:02 -07004635 if (CHIP_REV_IS_FPGA(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004636 factor = 120;
Eilon Greensteinad8d3942008-06-23 20:29:02 -07004637 else if (CHIP_REV_IS_EMUL(bp))
4638 factor = 200;
4639 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004640 factor = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004641
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004642 /* Disable inputs of parser neighbor blocks */
4643 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4644 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4645 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004646 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004647
4648 /* Write 0 to parser credits for CFC search request */
4649 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4650
4651 /* send Ethernet packet */
4652 bnx2x_lb_pckt(bp);
4653
4654 /* TODO do i reset NIG statistic? */
4655 /* Wait until NIG register shows 1 packet of size 0x10 */
4656 count = 1000 * factor;
4657 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004658
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004659 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4660 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004661 if (val == 0x10)
4662 break;
4663
4664 msleep(10);
4665 count--;
4666 }
4667 if (val != 0x10) {
4668 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4669 return -1;
4670 }
4671
4672 /* Wait until PRS register shows 1 packet */
4673 count = 1000 * factor;
4674 while (count) {
4675 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004676 if (val == 1)
4677 break;
4678
4679 msleep(10);
4680 count--;
4681 }
4682 if (val != 0x1) {
4683 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4684 return -2;
4685 }
4686
4687 /* Reset and init BRB, PRS */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004688 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004689 msleep(50);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004690 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004691 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004692 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4693 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004694
4695 DP(NETIF_MSG_HW, "part2\n");
4696
4697 /* Disable inputs of parser neighbor blocks */
4698 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4699 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4700 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004701 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004702
4703 /* Write 0 to parser credits for CFC search request */
4704 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4705
4706 /* send 10 Ethernet packets */
4707 for (i = 0; i < 10; i++)
4708 bnx2x_lb_pckt(bp);
4709
4710 /* Wait until NIG register shows 10 + 1
4711 packets of size 11*0x10 = 0xb0 */
4712 count = 1000 * factor;
4713 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004714
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004715 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4716 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004717 if (val == 0xb0)
4718 break;
4719
4720 msleep(10);
4721 count--;
4722 }
4723 if (val != 0xb0) {
4724 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4725 return -3;
4726 }
4727
4728 /* Wait until PRS register shows 2 packets */
4729 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4730 if (val != 2)
4731 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4732
4733 /* Write 1 to parser credits for CFC search request */
4734 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4735
4736 /* Wait until PRS register shows 3 packets */
4737 msleep(10 * factor);
4738 /* Wait until NIG register shows 1 packet of size 0x10 */
4739 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4740 if (val != 3)
4741 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4742
4743 /* clear NIG EOP FIFO */
4744 for (i = 0; i < 11; i++)
4745 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4746 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4747 if (val != 1) {
4748 BNX2X_ERR("clear of NIG failed\n");
4749 return -4;
4750 }
4751
4752 /* Reset and init BRB, PRS, NIG */
4753 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4754 msleep(50);
4755 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4756 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004757 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4758 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00004759#ifndef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004760 /* set NIC mode */
4761 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4762#endif
4763
4764 /* Enable inputs of parser neighbor blocks */
4765 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4766 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4767 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004768 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004769
4770 DP(NETIF_MSG_HW, "done\n");
4771
4772 return 0; /* OK */
4773}
4774
Vladislav Zolotarov4a33bc02011-01-09 02:20:04 +00004775static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004776{
4777 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004778 if (CHIP_IS_E2(bp))
4779 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4780 else
4781 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004782 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4783 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004784 /*
4785 * mask read length error interrupts in brb for parser
4786 * (parsing unit and 'checksum and crc' unit)
4787 * these errors are legal (PU reads fixed length and CAC can cause
4788 * read length error on truncated packets)
4789 */
4790 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004791 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4792 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4793 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4794 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4795 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004796/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4797/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004798 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4799 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4800 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004801/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4802/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004803 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4804 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4805 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4806 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004807/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4808/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004809
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004810 if (CHIP_REV_IS_FPGA(bp))
4811 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004812 else if (CHIP_IS_E2(bp))
4813 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4814 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4815 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4816 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4817 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4818 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004819 else
4820 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004821 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4822 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4823 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004824/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4825/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004826 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4827 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004828/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
Vladislav Zolotarov4a33bc02011-01-09 02:20:04 +00004829 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004830}
4831
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00004832static void bnx2x_reset_common(struct bnx2x *bp)
4833{
4834 /* reset_common */
4835 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4836 0xd3ffff7f);
4837 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4838}
4839
Eilon Greenstein573f2032009-08-12 08:24:14 +00004840static void bnx2x_init_pxp(struct bnx2x *bp)
4841{
4842 u16 devctl;
4843 int r_order, w_order;
4844
4845 pci_read_config_word(bp->pdev,
4846 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4847 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4848 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4849 if (bp->mrrs == -1)
4850 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4851 else {
4852 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4853 r_order = bp->mrrs;
4854 }
4855
4856 bnx2x_init_pxp_arb(bp, r_order, w_order);
4857}
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004858
4859static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4860{
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004861 int is_required;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004862 u32 val;
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004863 int port;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004864
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004865 if (BP_NOMCP(bp))
4866 return;
4867
4868 is_required = 0;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004869 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4870 SHARED_HW_CFG_FAN_FAILURE_MASK;
4871
4872 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4873 is_required = 1;
4874
4875 /*
4876 * The fan failure mechanism is usually related to the PHY type since
4877 * the power consumption of the board is affected by the PHY. Currently,
4878 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4879 */
4880 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4881 for (port = PORT_0; port < PORT_MAX; port++) {
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004882 is_required |=
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004883 bnx2x_fan_failure_det_req(
4884 bp,
4885 bp->common.shmem_base,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00004886 bp->common.shmem2_base,
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004887 port);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004888 }
4889
4890 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4891
4892 if (is_required == 0)
4893 return;
4894
4895 /* Fan failure is indicated by SPIO 5 */
4896 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4897 MISC_REGISTERS_SPIO_INPUT_HI_Z);
4898
4899 /* set to active low mode */
4900 val = REG_RD(bp, MISC_REG_SPIO_INT);
4901 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004902 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004903 REG_WR(bp, MISC_REG_SPIO_INT, val);
4904
4905 /* enable interrupt to signal the IGU */
4906 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4907 val |= (1 << MISC_REGISTERS_SPIO_5);
4908 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4909}
4910
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004911static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4912{
4913 u32 offset = 0;
4914
4915 if (CHIP_IS_E1(bp))
4916 return;
4917 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4918 return;
4919
4920 switch (BP_ABS_FUNC(bp)) {
4921 case 0:
4922 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4923 break;
4924 case 1:
4925 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4926 break;
4927 case 2:
4928 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4929 break;
4930 case 3:
4931 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4932 break;
4933 case 4:
4934 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4935 break;
4936 case 5:
4937 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4938 break;
4939 case 6:
4940 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4941 break;
4942 case 7:
4943 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4944 break;
4945 default:
4946 return;
4947 }
4948
4949 REG_WR(bp, offset, pretend_func_num);
4950 REG_RD(bp, offset);
4951 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4952}
4953
4954static void bnx2x_pf_disable(struct bnx2x *bp)
4955{
4956 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4957 val &= ~IGU_PF_CONF_FUNC_EN;
4958
4959 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4960 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4961 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4962}
4963
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004964static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004965{
4966 u32 val, i;
4967
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004968 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004969
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00004970 bnx2x_reset_common(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004971 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4972 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
4973
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004974 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004975 if (!CHIP_IS_E1(bp))
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004976 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004977
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004978 if (CHIP_IS_E2(bp)) {
4979 u8 fid;
4980
4981 /**
4982 * 4-port mode or 2-port mode we need to turn of master-enable
4983 * for everyone, after that, turn it back on for self.
4984 * so, we disregard multi-function or not, and always disable
4985 * for all functions on the given path, this means 0,2,4,6 for
4986 * path 0 and 1,3,5,7 for path 1
4987 */
4988 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
4989 if (fid == BP_ABS_FUNC(bp)) {
4990 REG_WR(bp,
4991 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
4992 1);
4993 continue;
4994 }
4995
4996 bnx2x_pretend_func(bp, fid);
4997 /* clear pf enable */
4998 bnx2x_pf_disable(bp);
4999 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
5000 }
5001 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005002
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005003 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005004 if (CHIP_IS_E1(bp)) {
5005 /* enable HW interrupt from PXP on USDM overflow
5006 bit 16 on INT_MASK_0 */
5007 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005008 }
5009
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005010 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005011 bnx2x_init_pxp(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005012
5013#ifdef __BIG_ENDIAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005014 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5015 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5016 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5017 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5018 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
Eilon Greenstein8badd272009-02-12 08:36:15 +00005019 /* make sure this value is 0 */
5020 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005021
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005022/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5023 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5024 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5025 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5026 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005027#endif
5028
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005029 bnx2x_ilt_init_page_size(bp, INITOP_SET);
5030
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005031 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5032 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005033
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005034 /* let the HW do it's magic ... */
5035 msleep(100);
5036 /* finish PXP init */
5037 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5038 if (val != 1) {
5039 BNX2X_ERR("PXP2 CFG failed\n");
5040 return -EBUSY;
5041 }
5042 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5043 if (val != 1) {
5044 BNX2X_ERR("PXP2 RD_INIT failed\n");
5045 return -EBUSY;
5046 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005047
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005048 /* Timers bug workaround E2 only. We need to set the entire ILT to
5049 * have entries with value "0" and valid bit on.
5050 * This needs to be done by the first PF that is loaded in a path
5051 * (i.e. common phase)
5052 */
5053 if (CHIP_IS_E2(bp)) {
5054 struct ilt_client_info ilt_cli;
5055 struct bnx2x_ilt ilt;
5056 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
5057 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
5058
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04005059 /* initialize dummy TM client */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005060 ilt_cli.start = 0;
5061 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
5062 ilt_cli.client_num = ILT_CLIENT_TM;
5063
5064 /* Step 1: set zeroes to all ilt page entries with valid bit on
5065 * Step 2: set the timers first/last ilt entry to point
5066 * to the entire range to prevent ILT range error for 3rd/4th
5067 * vnic (this code assumes existance of the vnic)
5068 *
5069 * both steps performed by call to bnx2x_ilt_client_init_op()
5070 * with dummy TM client
5071 *
5072 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
5073 * and his brother are split registers
5074 */
5075 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
5076 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
5077 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
5078
5079 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
5080 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
5081 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
5082 }
5083
5084
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005085 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5086 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005087
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005088 if (CHIP_IS_E2(bp)) {
5089 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
5090 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
5091 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
5092
5093 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
5094
5095 /* let the HW do it's magic ... */
5096 do {
5097 msleep(200);
5098 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
5099 } while (factor-- && (val != 1));
5100
5101 if (val != 1) {
5102 BNX2X_ERR("ATC_INIT failed\n");
5103 return -EBUSY;
5104 }
5105 }
5106
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005107 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005108
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005109 /* clean the DMAE memory */
5110 bp->dmae_ready = 1;
5111 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005112
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005113 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5114 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5115 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5116 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005117
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005118 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5119 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5120 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5121 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5122
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005123 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00005124
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005125 if (CHIP_MODE_IS_4_PORT(bp))
5126 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005127
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005128 /* QM queues pointers table */
5129 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
Michael Chan37b091b2009-10-10 13:46:55 +00005130
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005131 /* soft reset pulse */
5132 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5133 REG_WR(bp, QM_REG_SOFT_RESET, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005134
Michael Chan37b091b2009-10-10 13:46:55 +00005135#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005136 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005137#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005138
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005139 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005140 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
5141
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005142 if (!CHIP_REV_IS_SLOW(bp)) {
5143 /* enable hw interrupt from doorbell Q */
5144 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5145 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005146
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005147 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005148 if (CHIP_MODE_IS_4_PORT(bp)) {
5149 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5150 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5151 }
5152
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005153 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08005154 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
Michael Chan37b091b2009-10-10 13:46:55 +00005155#ifndef BCM_CNIC
Eilon Greenstein3196a882008-08-13 15:58:49 -07005156 /* set NIC mode */
5157 REG_WR(bp, PRS_REG_NIC_MODE, 1);
Michael Chan37b091b2009-10-10 13:46:55 +00005158#endif
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005159 if (!CHIP_IS_E1(bp))
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005160 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF_SD(bp));
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005161
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005162 if (CHIP_IS_E2(bp)) {
5163 /* Bit-map indicating which L2 hdrs may appear after the
5164 basic Ethernet header */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005165 int has_ovlan = IS_MF_SD(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005166 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5167 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5168 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005169
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005170 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5171 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5172 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5173 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005174
Eilon Greensteinca003922009-08-12 22:53:28 -07005175 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5176 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5177 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5178 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005179
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005180 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5181 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5182 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5183 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005184
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005185 if (CHIP_MODE_IS_4_PORT(bp))
5186 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5187
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005188 /* sync semi rtc */
5189 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5190 0x80000000);
5191 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5192 0x80000000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005193
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005194 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5195 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5196 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005197
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005198 if (CHIP_IS_E2(bp)) {
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005199 int has_ovlan = IS_MF_SD(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005200 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5201 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5202 }
5203
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005204 REG_WR(bp, SRC_REG_SOFT_RST, 1);
Tom Herbertc68ed252010-04-23 00:10:52 -07005205 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5206 REG_WR(bp, i, random32());
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005207
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005208 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00005209#ifdef BCM_CNIC
5210 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
5211 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
5212 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
5213 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
5214 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
5215 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
5216 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
5217 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
5218 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
5219 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
5220#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005221 REG_WR(bp, SRC_REG_SOFT_RST, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005222
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005223 if (sizeof(union cdu_context) != 1024)
5224 /* we currently assume that a context is 1024 bytes */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005225 dev_alert(&bp->pdev->dev, "please adjust the size "
5226 "of cdu_context(%ld)\n",
Joe Perches7995c642010-02-17 15:01:52 +00005227 (long)sizeof(union cdu_context));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005228
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005229 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005230 val = (4 << 24) + (0 << 12) + 1024;
5231 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005232
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005233 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005234 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005235 /* enable context validation interrupt from CFC */
5236 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5237
5238 /* set the thresholds to prevent CFC/CDU race */
5239 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005240
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005241 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005242
5243 if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5244 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5245
5246 bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005247 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005248
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005249 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005250 /* Reset PCIE errors for debug */
5251 REG_WR(bp, 0x2814, 0xffffffff);
5252 REG_WR(bp, 0x3820, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005253
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005254 if (CHIP_IS_E2(bp)) {
5255 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5256 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5257 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5258 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5259 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5260 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5261 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5262 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5263 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5264 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5265 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5266 }
5267
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005268 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005269 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005270 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005271 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005272
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005273 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005274 if (!CHIP_IS_E1(bp)) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005275 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005276 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005277 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005278 if (CHIP_IS_E2(bp)) {
5279 /* Bit-map indicating which L2 hdrs may appear after the
5280 basic Ethernet header */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005281 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF_SD(bp) ? 7 : 6));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005282 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005283
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005284 if (CHIP_REV_IS_SLOW(bp))
5285 msleep(200);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005286
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005287 /* finish CFC init */
5288 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5289 if (val != 1) {
5290 BNX2X_ERR("CFC LL_INIT failed\n");
5291 return -EBUSY;
5292 }
5293 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5294 if (val != 1) {
5295 BNX2X_ERR("CFC AC_INIT failed\n");
5296 return -EBUSY;
5297 }
5298 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5299 if (val != 1) {
5300 BNX2X_ERR("CFC CAM_INIT failed\n");
5301 return -EBUSY;
5302 }
5303 REG_WR(bp, CFC_REG_DEBUG0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005304
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005305 if (CHIP_IS_E1(bp)) {
5306 /* read NIG statistic
5307 to see if this is our first up since powerup */
5308 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5309 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005310
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005311 /* do internal memory self test */
5312 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5313 BNX2X_ERR("internal mem self test failed\n");
5314 return -EBUSY;
5315 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005316 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005317
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00005318 bnx2x_setup_fan_failure_detection(bp);
5319
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005320 /* clear PXP2 attentions */
5321 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005322
Vladislav Zolotarov4a33bc02011-01-09 02:20:04 +00005323 bnx2x_enable_blocks_attention(bp);
5324 if (CHIP_PARITY_ENABLED(bp))
5325 bnx2x_enable_blocks_parity(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005326
Yaniv Rosner6bbca912008-08-13 15:57:28 -07005327 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005328 /* In E2 2-PORT mode, same ext phy is used for the two paths */
5329 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
5330 CHIP_IS_E1x(bp)) {
5331 u32 shmem_base[2], shmem2_base[2];
5332 shmem_base[0] = bp->common.shmem_base;
5333 shmem2_base[0] = bp->common.shmem2_base;
5334 if (CHIP_IS_E2(bp)) {
5335 shmem_base[1] =
5336 SHMEM2_RD(bp, other_shmem_base_addr);
5337 shmem2_base[1] =
5338 SHMEM2_RD(bp, other_shmem2_base_addr);
5339 }
5340 bnx2x_acquire_phy_lock(bp);
5341 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5342 bp->common.chip_id);
5343 bnx2x_release_phy_lock(bp);
5344 }
Yaniv Rosner6bbca912008-08-13 15:57:28 -07005345 } else
5346 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5347
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005348 return 0;
5349}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005350
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005351static int bnx2x_init_hw_port(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005352{
5353 int port = BP_PORT(bp);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005354 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
Eilon Greenstein1c063282009-02-12 08:36:43 +00005355 u32 low, high;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005356 u32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005357
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005358 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005359
5360 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005361
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005362 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005363 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07005364
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005365 /* Timers bug workaround: disables the pf_master bit in pglue at
5366 * common phase, we need to enable it here before any dmae access are
5367 * attempted. Therefore we manually added the enable-master to the
5368 * port phase (it also happens in the function phase)
5369 */
5370 if (CHIP_IS_E2(bp))
5371 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5372
Eilon Greensteinca003922009-08-12 22:53:28 -07005373 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5374 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5375 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005376 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005377
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005378 /* QM cid (connection) count */
5379 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005380
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005381#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005382 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
Michael Chan37b091b2009-10-10 13:46:55 +00005383 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
5384 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005385#endif
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005386
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005387 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00005388
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005389 if (CHIP_MODE_IS_4_PORT(bp))
5390 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00005391
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005392 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
5393 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5394 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
5395 /* no pause for emulation and FPGA */
5396 low = 0;
5397 high = 513;
5398 } else {
5399 if (IS_MF(bp))
5400 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5401 else if (bp->dev->mtu > 4096) {
5402 if (bp->flags & ONE_PORT_FLAG)
5403 low = 160;
5404 else {
5405 val = bp->dev->mtu;
5406 /* (24*1024 + val*4)/256 */
5407 low = 96 + (val/64) +
5408 ((val % 64) ? 1 : 0);
5409 }
5410 } else
5411 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5412 high = low + 56; /* 14*1024/256 */
5413 }
5414 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5415 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5416 }
5417
5418 if (CHIP_MODE_IS_4_PORT(bp)) {
5419 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5420 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5421 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5422 BRB1_REG_MAC_GUARANTIED_0), 40);
5423 }
Eilon Greenstein1c063282009-02-12 08:36:43 +00005424
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005425 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07005426
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005427 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005428 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005429 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005430 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005431
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005432 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5433 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5434 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5435 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005436 if (CHIP_MODE_IS_4_PORT(bp))
5437 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005438
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005439 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005440 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005441
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005442 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005443
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005444 if (!CHIP_IS_E2(bp)) {
5445 /* configure PBF to work without PAUSE mtu 9000 */
5446 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005447
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005448 /* update threshold */
5449 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5450 /* update init credit */
5451 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005452
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005453 /* probe changes */
5454 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5455 udelay(50);
5456 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5457 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005458
Michael Chan37b091b2009-10-10 13:46:55 +00005459#ifdef BCM_CNIC
5460 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005461#endif
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005462 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005463 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005464
5465 if (CHIP_IS_E1(bp)) {
5466 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5467 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5468 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005469 bnx2x_init_block(bp, HC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005470
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005471 bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5472
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005473 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005474 /* init aeu_mask_attn_func_0/1:
5475 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5476 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5477 * bits 4-7 are used for "per vn group attention" */
Vladislav Zolotarove4901dd2010-12-13 05:44:18 +00005478 val = IS_MF(bp) ? 0xF7 : 0x7;
5479 /* Enable DCBX attention for all but E1 */
5480 val |= CHIP_IS_E1(bp) ? 0 : 0x10;
5481 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005482
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005483 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005484 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005485 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005486 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005487 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005488
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005489 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005490
5491 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5492
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005493 if (!CHIP_IS_E1(bp)) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005494 /* 0x2 disable mf_ov, 0x1 enable */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005495 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005496 (IS_MF_SD(bp) ? 0x1 : 0x2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005497
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005498 if (CHIP_IS_E2(bp)) {
5499 val = 0;
5500 switch (bp->mf_mode) {
5501 case MULTI_FUNCTION_SD:
5502 val = 1;
5503 break;
5504 case MULTI_FUNCTION_SI:
5505 val = 2;
5506 break;
5507 }
5508
5509 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5510 NIG_REG_LLH0_CLS_TYPE), val);
5511 }
Eilon Greenstein1c063282009-02-12 08:36:43 +00005512 {
5513 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5514 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5515 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5516 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005517 }
5518
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005519 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005520 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00005521 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00005522 bp->common.shmem2_base, port)) {
Eilon Greenstein4d295db2009-07-21 05:47:47 +00005523 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5524 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5525 val = REG_RD(bp, reg_addr);
Eliezer Tamirf1410642008-02-28 11:51:50 -08005526 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
Eilon Greenstein4d295db2009-07-21 05:47:47 +00005527 REG_WR(bp, reg_addr, val);
Eliezer Tamirf1410642008-02-28 11:51:50 -08005528 }
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07005529 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005530
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005531 return 0;
5532}
5533
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005534static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5535{
5536 int reg;
5537
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005538 if (CHIP_IS_E1(bp))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005539 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005540 else
5541 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005542
5543 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5544}
5545
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005546static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5547{
5548 bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5549}
5550
5551static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5552{
5553 u32 i, base = FUNC_ILT_BASE(func);
5554 for (i = base; i < base + ILT_PER_FUNC; i++)
5555 bnx2x_ilt_wr(bp, i, 0);
5556}
5557
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005558static int bnx2x_init_hw_func(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005559{
5560 int port = BP_PORT(bp);
5561 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005562 struct bnx2x_ilt *ilt = BP_ILT(bp);
5563 u16 cdu_ilt_start;
Eilon Greenstein8badd272009-02-12 08:36:15 +00005564 u32 addr, val;
Vladislav Zolotarovf4a66892010-10-19 05:13:09 +00005565 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
5566 int i, main_mem_width;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005567
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005568 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005569
Eilon Greenstein8badd272009-02-12 08:36:15 +00005570 /* set MSI reconfigure capability */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005571 if (bp->common.int_block == INT_BLOCK_HC) {
5572 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5573 val = REG_RD(bp, addr);
5574 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5575 REG_WR(bp, addr, val);
5576 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00005577
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005578 ilt = BP_ILT(bp);
5579 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005580
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005581 for (i = 0; i < L2_ILT_LINES(bp); i++) {
5582 ilt->lines[cdu_ilt_start + i].page =
5583 bp->context.vcxt + (ILT_PAGE_CIDS * i);
5584 ilt->lines[cdu_ilt_start + i].page_mapping =
5585 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
5586 /* cdu ilt pages are allocated manually so there's no need to
5587 set the size */
5588 }
5589 bnx2x_ilt_init_op(bp, INITOP_SET);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005590
Michael Chan37b091b2009-10-10 13:46:55 +00005591#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005592 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
Michael Chan37b091b2009-10-10 13:46:55 +00005593
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005594 /* T1 hash bits value determines the T1 number of entries */
5595 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
Michael Chan37b091b2009-10-10 13:46:55 +00005596#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005597
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005598#ifndef BCM_CNIC
5599 /* set NIC mode */
5600 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5601#endif /* BCM_CNIC */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005602
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005603 if (CHIP_IS_E2(bp)) {
5604 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5605
5606 /* Turn on a single ISR mode in IGU if driver is going to use
5607 * INT#x or MSI
5608 */
5609 if (!(bp->flags & USING_MSIX_FLAG))
5610 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5611 /*
5612 * Timers workaround bug: function init part.
5613 * Need to wait 20msec after initializing ILT,
5614 * needed to make sure there are no requests in
5615 * one of the PXP internal queues with "old" ILT addresses
5616 */
5617 msleep(20);
5618 /*
5619 * Master enable - Due to WB DMAE writes performed before this
5620 * register is re-initialized as part of the regular function
5621 * init
5622 */
5623 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5624 /* Enable the function in IGU */
5625 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5626 }
5627
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005628 bp->dmae_ready = 1;
5629
5630 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
5631
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005632 if (CHIP_IS_E2(bp))
5633 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5634
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005635 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5636 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5637 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5638 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5639 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5640 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5641 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5642 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5643 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5644
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005645 if (CHIP_IS_E2(bp)) {
5646 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5647 BP_PATH(bp));
5648 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5649 BP_PATH(bp));
5650 }
5651
5652 if (CHIP_MODE_IS_4_PORT(bp))
5653 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5654
5655 if (CHIP_IS_E2(bp))
5656 REG_WR(bp, QM_REG_PF_EN, 1);
5657
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005658 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005659
5660 if (CHIP_MODE_IS_4_PORT(bp))
5661 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5662
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005663 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5664 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5665 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5666 bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5667 bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5668 bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5669 bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5670 bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5671 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5672 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5673 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005674 if (CHIP_IS_E2(bp))
5675 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5676
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005677 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
5678
5679 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
5680
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005681 if (CHIP_IS_E2(bp))
5682 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5683
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005684 if (IS_MF(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005685 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005686 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005687 }
5688
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005689 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5690
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005691 /* HC init per function */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005692 if (bp->common.int_block == INT_BLOCK_HC) {
5693 if (CHIP_IS_E1H(bp)) {
5694 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5695
5696 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5697 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5698 }
5699 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5700
5701 } else {
5702 int num_segs, sb_idx, prod_offset;
5703
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005704 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5705
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005706 if (CHIP_IS_E2(bp)) {
5707 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5708 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5709 }
5710
5711 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5712
5713 if (CHIP_IS_E2(bp)) {
5714 int dsb_idx = 0;
5715 /**
5716 * Producer memory:
5717 * E2 mode: address 0-135 match to the mapping memory;
5718 * 136 - PF0 default prod; 137 - PF1 default prod;
5719 * 138 - PF2 default prod; 139 - PF3 default prod;
5720 * 140 - PF0 attn prod; 141 - PF1 attn prod;
5721 * 142 - PF2 attn prod; 143 - PF3 attn prod;
5722 * 144-147 reserved.
5723 *
5724 * E1.5 mode - In backward compatible mode;
5725 * for non default SB; each even line in the memory
5726 * holds the U producer and each odd line hold
5727 * the C producer. The first 128 producers are for
5728 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5729 * producers are for the DSB for each PF.
5730 * Each PF has five segments: (the order inside each
5731 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5732 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5733 * 144-147 attn prods;
5734 */
5735 /* non-default-status-blocks */
5736 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5737 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5738 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5739 prod_offset = (bp->igu_base_sb + sb_idx) *
5740 num_segs;
5741
5742 for (i = 0; i < num_segs; i++) {
5743 addr = IGU_REG_PROD_CONS_MEMORY +
5744 (prod_offset + i) * 4;
5745 REG_WR(bp, addr, 0);
5746 }
5747 /* send consumer update with value 0 */
5748 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5749 USTORM_ID, 0, IGU_INT_NOP, 1);
5750 bnx2x_igu_clear_sb(bp,
5751 bp->igu_base_sb + sb_idx);
5752 }
5753
5754 /* default-status-blocks */
5755 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5756 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5757
5758 if (CHIP_MODE_IS_4_PORT(bp))
5759 dsb_idx = BP_FUNC(bp);
5760 else
5761 dsb_idx = BP_E1HVN(bp);
5762
5763 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5764 IGU_BC_BASE_DSB_PROD + dsb_idx :
5765 IGU_NORM_BASE_DSB_PROD + dsb_idx);
5766
5767 for (i = 0; i < (num_segs * E1HVN_MAX);
5768 i += E1HVN_MAX) {
5769 addr = IGU_REG_PROD_CONS_MEMORY +
5770 (prod_offset + i)*4;
5771 REG_WR(bp, addr, 0);
5772 }
5773 /* send consumer update with 0 */
5774 if (CHIP_INT_MODE_IS_BC(bp)) {
5775 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5776 USTORM_ID, 0, IGU_INT_NOP, 1);
5777 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5778 CSTORM_ID, 0, IGU_INT_NOP, 1);
5779 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5780 XSTORM_ID, 0, IGU_INT_NOP, 1);
5781 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5782 TSTORM_ID, 0, IGU_INT_NOP, 1);
5783 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5784 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5785 } else {
5786 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5787 USTORM_ID, 0, IGU_INT_NOP, 1);
5788 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5789 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5790 }
5791 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5792
5793 /* !!! these should become driver const once
5794 rf-tool supports split-68 const */
5795 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5796 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5797 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5798 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5799 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5800 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5801 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005802 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005803
Eliezer Tamirc14423f2008-02-28 11:49:42 -08005804 /* Reset PCIE errors for debug */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005805 REG_WR(bp, 0x2114, 0xffffffff);
5806 REG_WR(bp, 0x2120, 0xffffffff);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005807
5808 bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5809 bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5810 bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5811 bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5812 bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5813 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5814
Vladislav Zolotarovf4a66892010-10-19 05:13:09 +00005815 if (CHIP_IS_E1x(bp)) {
5816 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
5817 main_mem_base = HC_REG_MAIN_MEMORY +
5818 BP_PORT(bp) * (main_mem_size * 4);
5819 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
5820 main_mem_width = 8;
5821
5822 val = REG_RD(bp, main_mem_prty_clr);
5823 if (val)
5824 DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
5825 "block during "
5826 "function init (0x%x)!\n", val);
5827
5828 /* Clear "false" parity errors in MSI-X table */
5829 for (i = main_mem_base;
5830 i < main_mem_base + main_mem_size * 4;
5831 i += main_mem_width) {
5832 bnx2x_read_dmae(bp, i, main_mem_width / 4);
5833 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
5834 i, main_mem_width / 4);
5835 }
5836 /* Clear HC parity attention */
5837 REG_RD(bp, main_mem_prty_clr);
5838 }
5839
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00005840 bnx2x_phy_probe(&bp->link_params);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005841
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005842 return 0;
5843}
5844
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005845int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005846{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005847 int rc = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005848
5849 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005850 BP_ABS_FUNC(bp), load_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005851
5852 bp->dmae_ready = 0;
5853 mutex_init(&bp->dmae_mutex);
Eilon Greenstein54016b22009-08-12 08:23:48 +00005854 rc = bnx2x_gunzip_init(bp);
5855 if (rc)
5856 return rc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005857
5858 switch (load_code) {
5859 case FW_MSG_CODE_DRV_LOAD_COMMON:
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005860 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005861 rc = bnx2x_init_hw_common(bp, load_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005862 if (rc)
5863 goto init_hw_err;
5864 /* no break */
5865
5866 case FW_MSG_CODE_DRV_LOAD_PORT:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005867 rc = bnx2x_init_hw_port(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005868 if (rc)
5869 goto init_hw_err;
5870 /* no break */
5871
5872 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005873 rc = bnx2x_init_hw_func(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005874 if (rc)
5875 goto init_hw_err;
5876 break;
5877
5878 default:
5879 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5880 break;
5881 }
5882
5883 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005884 int mb_idx = BP_FW_MB_IDX(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005885
5886 bp->fw_drv_pulse_wr_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005887 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005888 DRV_PULSE_SEQ_MASK);
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00005889 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5890 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005891
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005892init_hw_err:
5893 bnx2x_gunzip_end(bp);
5894
5895 return rc;
5896}
5897
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005898void bnx2x_free_mem(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005899{
5900
5901#define BNX2X_PCI_FREE(x, y, size) \
5902 do { \
5903 if (x) { \
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005904 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005905 x = NULL; \
5906 y = 0; \
5907 } \
5908 } while (0)
5909
5910#define BNX2X_FREE(x) \
5911 do { \
5912 if (x) { \
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005913 kfree((void *)x); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005914 x = NULL; \
5915 } \
5916 } while (0)
5917
5918 int i;
5919
5920 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005921 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005922 for_each_queue(bp, i) {
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00005923#ifdef BCM_CNIC
5924 /* FCoE client uses default status block */
5925 if (IS_FCOE_IDX(i)) {
5926 union host_hc_status_block *sb =
5927 &bnx2x_fp(bp, i, status_blk);
5928 memset(sb, 0, sizeof(union host_hc_status_block));
5929 bnx2x_fp(bp, i, status_blk_mapping) = 0;
5930 } else {
5931#endif
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005932 /* status blocks */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005933 if (CHIP_IS_E2(bp))
5934 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
5935 bnx2x_fp(bp, i, status_blk_mapping),
5936 sizeof(struct host_hc_status_block_e2));
5937 else
5938 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
5939 bnx2x_fp(bp, i, status_blk_mapping),
5940 sizeof(struct host_hc_status_block_e1x));
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00005941#ifdef BCM_CNIC
5942 }
5943#endif
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005944 }
5945 /* Rx */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00005946 for_each_rx_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005947
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005948 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005949 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5950 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5951 bnx2x_fp(bp, i, rx_desc_mapping),
5952 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5953
5954 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5955 bnx2x_fp(bp, i, rx_comp_mapping),
5956 sizeof(struct eth_fast_path_rx_cqe) *
5957 NUM_RCQ_BD);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005958
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005959 /* SGE ring */
Eilon Greenstein32626232008-08-13 15:51:07 -07005960 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005961 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5962 bnx2x_fp(bp, i, rx_sge_mapping),
5963 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5964 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005965 /* Tx */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00005966 for_each_tx_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005967
5968 /* fastpath tx rings: tx_buf tx_desc */
5969 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5970 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5971 bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07005972 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005973 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005974 /* end of fastpath */
5975
5976 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005977 sizeof(struct host_sp_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005978
5979 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005980 sizeof(struct bnx2x_slowpath));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005981
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005982 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5983 bp->context.size);
5984
5985 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5986
5987 BNX2X_FREE(bp->ilt->lines);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005988
Michael Chan37b091b2009-10-10 13:46:55 +00005989#ifdef BCM_CNIC
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005990 if (CHIP_IS_E2(bp))
5991 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
5992 sizeof(struct host_hc_status_block_e2));
5993 else
5994 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5995 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005996
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005997 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005998#endif
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005999
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006000 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006001
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006002 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
6003 BCM_PAGE_SIZE * NUM_EQ_PAGES);
6004
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006005#undef BNX2X_PCI_FREE
6006#undef BNX2X_KFREE
6007}
6008
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006009static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
6010{
6011 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
6012 if (CHIP_IS_E2(bp)) {
6013 bnx2x_fp(bp, index, sb_index_values) =
6014 (__le16 *)status_blk.e2_sb->sb.index_values;
6015 bnx2x_fp(bp, index, sb_running_index) =
6016 (__le16 *)status_blk.e2_sb->sb.running_index;
6017 } else {
6018 bnx2x_fp(bp, index, sb_index_values) =
6019 (__le16 *)status_blk.e1x_sb->sb.index_values;
6020 bnx2x_fp(bp, index, sb_running_index) =
6021 (__le16 *)status_blk.e1x_sb->sb.running_index;
6022 }
6023}
6024
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00006025int bnx2x_alloc_mem(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006026{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006027#define BNX2X_PCI_ALLOC(x, y, size) \
6028 do { \
FUJITA Tomonori1a983142010-04-04 01:51:03 +00006029 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006030 if (x == NULL) \
6031 goto alloc_mem_err; \
6032 memset(x, 0, size); \
6033 } while (0)
6034
6035#define BNX2X_ALLOC(x, size) \
6036 do { \
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006037 x = kzalloc(size, GFP_KERNEL); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006038 if (x == NULL) \
6039 goto alloc_mem_err; \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006040 } while (0)
6041
6042 int i;
6043
6044 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006045 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006046 for_each_queue(bp, i) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006047 union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006048 bnx2x_fp(bp, i, bp) = bp;
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006049 /* status blocks */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006050#ifdef BCM_CNIC
6051 if (!IS_FCOE_IDX(i)) {
6052#endif
6053 if (CHIP_IS_E2(bp))
6054 BNX2X_PCI_ALLOC(sb->e2_sb,
6055 &bnx2x_fp(bp, i, status_blk_mapping),
6056 sizeof(struct host_hc_status_block_e2));
6057 else
6058 BNX2X_PCI_ALLOC(sb->e1x_sb,
6059 &bnx2x_fp(bp, i, status_blk_mapping),
6060 sizeof(struct host_hc_status_block_e1x));
6061#ifdef BCM_CNIC
6062 }
6063#endif
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006064 set_sb_shortcuts(bp, i);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006065 }
6066 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006067 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006068
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006069 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006070 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6071 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6072 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6073 &bnx2x_fp(bp, i, rx_desc_mapping),
6074 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6075
6076 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6077 &bnx2x_fp(bp, i, rx_comp_mapping),
6078 sizeof(struct eth_fast_path_rx_cqe) *
6079 NUM_RCQ_BD);
6080
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006081 /* SGE ring */
6082 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6083 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6084 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6085 &bnx2x_fp(bp, i, rx_sge_mapping),
6086 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006087 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006088 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006089 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006090
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006091 /* fastpath tx rings: tx_buf tx_desc */
6092 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6093 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6094 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6095 &bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07006096 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006097 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006098 /* end of fastpath */
6099
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006100#ifdef BCM_CNIC
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006101 if (CHIP_IS_E2(bp))
6102 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
6103 sizeof(struct host_hc_status_block_e2));
6104 else
6105 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
6106 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006107
6108 /* allocate searcher T2 table */
6109 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
6110#endif
6111
6112
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006113 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006114 sizeof(struct host_sp_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006115
6116 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6117 sizeof(struct bnx2x_slowpath));
6118
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006119 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006120
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006121 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
6122 bp->context.size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006123
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006124 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006125
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006126 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
6127 goto alloc_mem_err;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006128
6129 /* Slow path ring */
6130 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6131
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006132 /* EQ */
6133 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
6134 BCM_PAGE_SIZE * NUM_EQ_PAGES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006135 return 0;
6136
6137alloc_mem_err:
6138 bnx2x_free_mem(bp);
6139 return -ENOMEM;
6140
6141#undef BNX2X_PCI_ALLOC
6142#undef BNX2X_ALLOC
6143}
6144
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006145/*
6146 * Init service functions
6147 */
stephen hemminger8d962862010-10-21 07:50:56 +00006148static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6149 int *state_p, int flags);
6150
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006151int bnx2x_func_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006152{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006153 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006154
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006155 /* Wait for completion */
6156 return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
6157 WAIT_RAMROD_COMMON);
6158}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006159
stephen hemminger8d962862010-10-21 07:50:56 +00006160static int bnx2x_func_stop(struct bnx2x *bp)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006161{
6162 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006163
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006164 /* Wait for completion */
6165 return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
6166 0, &(bp->state), WAIT_RAMROD_COMMON);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006167}
6168
Michael Chane665bfd2009-10-10 13:46:54 +00006169/**
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006170 * Sets a MAC in a CAM for a few L2 Clients for E1x chips
Michael Chane665bfd2009-10-10 13:46:54 +00006171 *
6172 * @param bp driver descriptor
6173 * @param set set or clear an entry (1 or 0)
6174 * @param mac pointer to a buffer containing a MAC
6175 * @param cl_bit_vec bit vector of clients to register a MAC for
6176 * @param cam_offset offset in a CAM to use
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006177 * @param is_bcast is the set MAC a broadcast address (for E1 only)
Michael Chane665bfd2009-10-10 13:46:54 +00006178 */
Joe Perches215faf92010-12-21 02:16:10 -08006179static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006180 u32 cl_bit_vec, u8 cam_offset,
6181 u8 is_bcast)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006182{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006183 struct mac_configuration_cmd *config =
6184 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
6185 int ramrod_flags = WAIT_RAMROD_COMMON;
6186
6187 bp->set_mac_pending = 1;
6188 smp_wmb();
6189
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08006190 config->hdr.length = 1;
Michael Chane665bfd2009-10-10 13:46:54 +00006191 config->hdr.offset = cam_offset;
6192 config->hdr.client_id = 0xff;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006193 config->hdr.reserved1 = 0;
6194
6195 /* primary MAC */
6196 config->config_table[0].msb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00006197 swab16(*(u16 *)&mac[0]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006198 config->config_table[0].middle_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00006199 swab16(*(u16 *)&mac[2]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006200 config->config_table[0].lsb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00006201 swab16(*(u16 *)&mac[4]);
Eilon Greensteinca003922009-08-12 22:53:28 -07006202 config->config_table[0].clients_bit_vector =
Michael Chane665bfd2009-10-10 13:46:54 +00006203 cpu_to_le32(cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006204 config->config_table[0].vlan_id = 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006205 config->config_table[0].pf_id = BP_FUNC(bp);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006206 if (set)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006207 SET_FLAG(config->config_table[0].flags,
6208 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6209 T_ETH_MAC_COMMAND_SET);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006210 else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006211 SET_FLAG(config->config_table[0].flags,
6212 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6213 T_ETH_MAC_COMMAND_INVALIDATE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006214
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006215 if (is_bcast)
6216 SET_FLAG(config->config_table[0].flags,
6217 MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
6218
6219 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006220 (set ? "setting" : "clearing"),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006221 config->config_table[0].msb_mac_addr,
6222 config->config_table[0].middle_mac_addr,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006223 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006224
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006225 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006226 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006227 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
6228
6229 /* Wait for a completion */
6230 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006231}
6232
stephen hemminger8d962862010-10-21 07:50:56 +00006233static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6234 int *state_p, int flags)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006235{
6236 /* can take a while if any port is running */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006237 int cnt = 5000;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006238 u8 poll = flags & WAIT_RAMROD_POLL;
6239 u8 common = flags & WAIT_RAMROD_COMMON;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006240
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006241 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6242 poll ? "polling" : "waiting", state, idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006243
6244 might_sleep();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006245 while (cnt--) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006246 if (poll) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006247 if (common)
6248 bnx2x_eq_int(bp);
6249 else {
6250 bnx2x_rx_int(bp->fp, 10);
6251 /* if index is different from 0
6252 * the reply for some commands will
6253 * be on the non default queue
6254 */
6255 if (idx)
6256 bnx2x_rx_int(&bp->fp[idx], 10);
6257 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006258 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006259
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006260 mb(); /* state is changed by bnx2x_sp_event() */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006261 if (*state_p == state) {
6262#ifdef BNX2X_STOP_ON_ERROR
6263 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6264#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006265 return 0;
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006266 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006267
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006268 msleep(1);
Eilon Greensteine3553b22009-08-12 08:23:31 +00006269
6270 if (bp->panic)
6271 return -EIO;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006272 }
6273
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006274 /* timeout! */
Eliezer Tamir49d66772008-02-28 11:53:13 -08006275 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6276 poll ? "polling" : "waiting", state, idx);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006277#ifdef BNX2X_STOP_ON_ERROR
6278 bnx2x_panic();
6279#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006280
Eliezer Tamir49d66772008-02-28 11:53:13 -08006281 return -EBUSY;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006282}
6283
stephen hemminger8d962862010-10-21 07:50:56 +00006284static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
Michael Chane665bfd2009-10-10 13:46:54 +00006285{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006286 if (CHIP_IS_E1H(bp))
6287 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6288 else if (CHIP_MODE_IS_4_PORT(bp))
6289 return BP_FUNC(bp) * 32 + rel_offset;
6290 else
6291 return BP_VN(bp) * 32 + rel_offset;
Michael Chane665bfd2009-10-10 13:46:54 +00006292}
6293
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08006294/**
6295 * LLH CAM line allocations: currently only iSCSI and ETH macs are
6296 * relevant. In addition, current implementation is tuned for a
6297 * single ETH MAC.
6298 *
6299 * When multiple unicast ETH MACs PF configuration in switch
6300 * independent mode is required (NetQ, multiple netdev MACs,
6301 * etc.), consider better utilisation of 16 per function MAC
6302 * entries in the LLH memory.
6303 */
6304enum {
6305 LLH_CAM_ISCSI_ETH_LINE = 0,
6306 LLH_CAM_ETH_LINE,
6307 LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE
6308};
6309
6310static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
6311 int set,
6312 unsigned char *dev_addr,
6313 int index)
6314{
6315 u32 wb_data[2];
6316 u32 mem_offset, ena_offset, mem_index;
6317 /**
6318 * indexes mapping:
6319 * 0..7 - goes to MEM
6320 * 8..15 - goes to MEM2
6321 */
6322
6323 if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
6324 return;
6325
6326 /* calculate memory start offset according to the mapping
6327 * and index in the memory */
6328 if (index < NIG_LLH_FUNC_MEM_MAX_OFFSET) {
6329 mem_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
6330 NIG_REG_LLH0_FUNC_MEM;
6331 ena_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
6332 NIG_REG_LLH0_FUNC_MEM_ENABLE;
6333 mem_index = index;
6334 } else {
6335 mem_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2 :
6336 NIG_REG_P0_LLH_FUNC_MEM2;
6337 ena_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE :
6338 NIG_REG_P0_LLH_FUNC_MEM2_ENABLE;
6339 mem_index = index - NIG_LLH_FUNC_MEM_MAX_OFFSET;
6340 }
6341
6342 if (set) {
6343 /* LLH_FUNC_MEM is a u64 WB register */
6344 mem_offset += 8*mem_index;
6345
6346 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
6347 (dev_addr[4] << 8) | dev_addr[5]);
6348 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
6349
6350 REG_WR_DMAE(bp, mem_offset, wb_data, 2);
6351 }
6352
6353 /* enable/disable the entry */
6354 REG_WR(bp, ena_offset + 4*mem_index, set);
6355
6356}
6357
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006358void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
Michael Chane665bfd2009-10-10 13:46:54 +00006359{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006360 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
6361 bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
6362
6363 /* networking MAC */
6364 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6365 (1 << bp->fp->cl_id), cam_offset , 0);
6366
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08006367 bnx2x_set_mac_in_nig(bp, set, bp->dev->dev_addr, LLH_CAM_ETH_LINE);
6368
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006369 if (CHIP_IS_E1(bp)) {
6370 /* broadcast MAC */
Joe Perches215faf92010-12-21 02:16:10 -08006371 static const u8 bcast[ETH_ALEN] = {
6372 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
6373 };
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006374 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6375 }
6376}
6377static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
6378{
6379 int i = 0, old;
6380 struct net_device *dev = bp->dev;
6381 struct netdev_hw_addr *ha;
6382 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6383 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6384
6385 netdev_for_each_mc_addr(ha, dev) {
6386 /* copy mac */
6387 config_cmd->config_table[i].msb_mac_addr =
6388 swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
6389 config_cmd->config_table[i].middle_mac_addr =
6390 swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
6391 config_cmd->config_table[i].lsb_mac_addr =
6392 swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
6393
6394 config_cmd->config_table[i].vlan_id = 0;
6395 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
6396 config_cmd->config_table[i].clients_bit_vector =
6397 cpu_to_le32(1 << BP_L_ID(bp));
6398
6399 SET_FLAG(config_cmd->config_table[i].flags,
6400 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6401 T_ETH_MAC_COMMAND_SET);
6402
6403 DP(NETIF_MSG_IFUP,
6404 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6405 config_cmd->config_table[i].msb_mac_addr,
6406 config_cmd->config_table[i].middle_mac_addr,
6407 config_cmd->config_table[i].lsb_mac_addr);
6408 i++;
6409 }
6410 old = config_cmd->hdr.length;
6411 if (old > i) {
6412 for (; i < old; i++) {
6413 if (CAM_IS_INVALID(config_cmd->
6414 config_table[i])) {
6415 /* already invalidated */
6416 break;
6417 }
6418 /* invalidate */
6419 SET_FLAG(config_cmd->config_table[i].flags,
6420 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6421 T_ETH_MAC_COMMAND_INVALIDATE);
6422 }
6423 }
6424
6425 config_cmd->hdr.length = i;
6426 config_cmd->hdr.offset = offset;
6427 config_cmd->hdr.client_id = 0xff;
6428 config_cmd->hdr.reserved1 = 0;
6429
6430 bp->set_mac_pending = 1;
Michael Chane665bfd2009-10-10 13:46:54 +00006431 smp_wmb();
6432
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006433 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6434 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6435}
6436static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
6437{
6438 int i;
6439 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6440 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6441 int ramrod_flags = WAIT_RAMROD_COMMON;
6442
6443 bp->set_mac_pending = 1;
6444 smp_wmb();
6445
6446 for (i = 0; i < config_cmd->hdr.length; i++)
6447 SET_FLAG(config_cmd->config_table[i].flags,
6448 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6449 T_ETH_MAC_COMMAND_INVALIDATE);
6450
6451 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6452 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
Michael Chane665bfd2009-10-10 13:46:54 +00006453
6454 /* Wait for a completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006455 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
6456 ramrod_flags);
6457
Michael Chane665bfd2009-10-10 13:46:54 +00006458}
6459
Michael Chan993ac7b2009-10-10 13:46:56 +00006460#ifdef BCM_CNIC
6461/**
6462 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
6463 * MAC(s). This function will wait until the ramdord completion
6464 * returns.
6465 *
6466 * @param bp driver handle
6467 * @param set set or clear the CAM entry
6468 *
6469 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6470 */
stephen hemminger8d962862010-10-21 07:50:56 +00006471static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
Michael Chan993ac7b2009-10-10 13:46:56 +00006472{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006473 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
6474 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006475 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
6476 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006477 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
Michael Chan993ac7b2009-10-10 13:46:56 +00006478
6479 /* Send a SET_MAC ramrod */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006480 bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
6481 cam_offset, 0);
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08006482
6483 bnx2x_set_mac_in_nig(bp, set, bp->iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006484
6485 return 0;
6486}
6487
6488/**
6489 * Set FCoE L2 MAC(s) at the next enties in the CAM after the
6490 * ETH MAC(s). This function will wait until the ramdord
6491 * completion returns.
6492 *
6493 * @param bp driver handle
6494 * @param set set or clear the CAM entry
6495 *
6496 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6497 */
6498int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set)
6499{
6500 u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
6501 /**
6502 * CAM allocation for E1H
6503 * eth unicasts: by func number
6504 * iscsi: by func number
6505 * fip unicast: by func number
6506 * fip multicast: by func number
6507 */
6508 bnx2x_set_mac_addr_gen(bp, set, bp->fip_mac,
6509 cl_bit_vec, bnx2x_e1h_cam_offset(bp, CAM_FIP_ETH_LINE), 0);
6510
6511 return 0;
6512}
6513
6514int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set)
6515{
6516 u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
6517
6518 /**
6519 * CAM allocation for E1H
6520 * eth unicasts: by func number
6521 * iscsi: by func number
6522 * fip unicast: by func number
6523 * fip multicast: by func number
6524 */
6525 bnx2x_set_mac_addr_gen(bp, set, ALL_ENODE_MACS, cl_bit_vec,
6526 bnx2x_e1h_cam_offset(bp, CAM_FIP_MCAST_LINE), 0);
6527
Michael Chan993ac7b2009-10-10 13:46:56 +00006528 return 0;
6529}
6530#endif
6531
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006532static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
6533 struct bnx2x_client_init_params *params,
6534 u8 activate,
6535 struct client_init_ramrod_data *data)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006536{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006537 /* Clear the buffer */
6538 memset(data, 0, sizeof(*data));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006539
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006540 /* general */
6541 data->general.client_id = params->rxq_params.cl_id;
6542 data->general.statistics_counter_id = params->rxq_params.stat_id;
6543 data->general.statistics_en_flg =
6544 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006545 data->general.is_fcoe_flg =
6546 (params->ramrod_params.flags & CLIENT_IS_FCOE) ? 1 : 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006547 data->general.activate_flg = activate;
6548 data->general.sp_client_id = params->rxq_params.spcl_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006549
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006550 /* Rx data */
6551 data->rx.tpa_en_flg =
6552 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
6553 data->rx.vmqueue_mode_en_flg = 0;
6554 data->rx.cache_line_alignment_log_size =
6555 params->rxq_params.cache_line_log;
6556 data->rx.enable_dynamic_hc =
6557 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
6558 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
6559 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
6560 data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
6561
6562 /* We don't set drop flags */
6563 data->rx.drop_ip_cs_err_flg = 0;
6564 data->rx.drop_tcp_cs_err_flg = 0;
6565 data->rx.drop_ttl0_flg = 0;
6566 data->rx.drop_udp_cs_err_flg = 0;
6567
6568 data->rx.inner_vlan_removal_enable_flg =
6569 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
6570 data->rx.outer_vlan_removal_enable_flg =
6571 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
6572 data->rx.status_block_id = params->rxq_params.fw_sb_id;
6573 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
6574 data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
6575 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
6576 data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
6577 data->rx.bd_page_base.lo =
6578 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
6579 data->rx.bd_page_base.hi =
6580 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
6581 data->rx.sge_page_base.lo =
6582 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
6583 data->rx.sge_page_base.hi =
6584 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
6585 data->rx.cqe_page_base.lo =
6586 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
6587 data->rx.cqe_page_base.hi =
6588 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
6589 data->rx.is_leading_rss =
6590 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
6591 data->rx.is_approx_mcast = data->rx.is_leading_rss;
6592
6593 /* Tx data */
6594 data->tx.enforce_security_flg = 0; /* VF specific */
6595 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
6596 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
6597 data->tx.mtu = 0; /* VF specific */
6598 data->tx.tx_bd_page_base.lo =
6599 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
6600 data->tx.tx_bd_page_base.hi =
6601 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
6602
6603 /* flow control data */
6604 data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
6605 data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
6606 data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
6607 data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
6608 data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
6609 data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
6610 data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
6611
6612 data->fc.safc_group_num = params->txq_params.cos;
6613 data->fc.safc_group_en_flg =
6614 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006615 data->fc.traffic_type =
6616 (params->ramrod_params.flags & CLIENT_IS_FCOE) ?
6617 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006618}
6619
6620static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6621{
6622 /* ustorm cxt validation */
6623 cxt->ustorm_ag_context.cdu_usage =
6624 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
6625 ETH_CONNECTION_TYPE);
6626 /* xcontext validation */
6627 cxt->xstorm_ag_context.cdu_reserved =
6628 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
6629 ETH_CONNECTION_TYPE);
6630}
6631
stephen hemminger8d962862010-10-21 07:50:56 +00006632static int bnx2x_setup_fw_client(struct bnx2x *bp,
6633 struct bnx2x_client_init_params *params,
6634 u8 activate,
6635 struct client_init_ramrod_data *data,
6636 dma_addr_t data_mapping)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006637{
6638 u16 hc_usec;
6639 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
6640 int ramrod_flags = 0, rc;
6641
6642 /* HC and context validation values */
6643 hc_usec = params->txq_params.hc_rate ?
6644 1000000 / params->txq_params.hc_rate : 0;
6645 bnx2x_update_coalesce_sb_index(bp,
6646 params->txq_params.fw_sb_id,
6647 params->txq_params.sb_cq_index,
6648 !(params->txq_params.flags & QUEUE_FLG_HC),
6649 hc_usec);
6650
6651 *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
6652
6653 hc_usec = params->rxq_params.hc_rate ?
6654 1000000 / params->rxq_params.hc_rate : 0;
6655 bnx2x_update_coalesce_sb_index(bp,
6656 params->rxq_params.fw_sb_id,
6657 params->rxq_params.sb_cq_index,
6658 !(params->rxq_params.flags & QUEUE_FLG_HC),
6659 hc_usec);
6660
6661 bnx2x_set_ctx_validation(params->rxq_params.cxt,
6662 params->rxq_params.cid);
6663
6664 /* zero stats */
6665 if (params->txq_params.flags & QUEUE_FLG_STATS)
6666 storm_memset_xstats_zero(bp, BP_PORT(bp),
6667 params->txq_params.stat_id);
6668
6669 if (params->rxq_params.flags & QUEUE_FLG_STATS) {
6670 storm_memset_ustats_zero(bp, BP_PORT(bp),
6671 params->rxq_params.stat_id);
6672 storm_memset_tstats_zero(bp, BP_PORT(bp),
6673 params->rxq_params.stat_id);
6674 }
6675
6676 /* Fill the ramrod data */
6677 bnx2x_fill_cl_init_data(bp, params, activate, data);
6678
6679 /* SETUP ramrod.
6680 *
6681 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6682 * barrier except from mmiowb() is needed to impose a
6683 * proper ordering of memory operations.
6684 */
6685 mmiowb();
6686
6687
6688 bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
6689 U64_HI(data_mapping), U64_LO(data_mapping), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006690
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006691 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006692 rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
6693 params->ramrod_params.index,
6694 params->ramrod_params.pstate,
6695 ramrod_flags);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006696 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006697}
6698
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006699/**
6700 * Configure interrupt mode according to current configuration.
6701 * In case of MSI-X it will also try to enable MSI-X.
6702 *
6703 * @param bp
6704 *
6705 * @return int
6706 */
6707static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006708{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006709 int rc = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -07006710
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006711 switch (bp->int_mode) {
6712 case INT_MODE_MSI:
6713 bnx2x_enable_msi(bp);
6714 /* falling through... */
6715 case INT_MODE_INTx:
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006716 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006717 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
Eilon Greensteinca003922009-08-12 22:53:28 -07006718 break;
Eilon Greensteinca003922009-08-12 22:53:28 -07006719 default:
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006720 /* Set number of queues according to bp->multi_mode value */
6721 bnx2x_set_num_queues(bp);
6722
6723 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
6724 bp->num_queues);
6725
6726 /* if we can't use MSI-X we only need one fp,
6727 * so try to enable MSI-X with the requested number of fp's
6728 * and fallback to MSI or legacy INTx with one fp
6729 */
6730 rc = bnx2x_enable_msix(bp);
6731 if (rc) {
6732 /* failed to enable MSI-X */
6733 if (bp->multi_mode)
6734 DP(NETIF_MSG_IFUP,
6735 "Multi requested but failed to "
6736 "enable MSI-X (%d), "
6737 "set number of queues to %d\n",
6738 bp->num_queues,
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006739 1 + NONE_ETH_CONTEXT_USE);
6740 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006741
6742 if (!(bp->flags & DISABLE_MSI_FLAG))
6743 bnx2x_enable_msi(bp);
6744 }
6745
Eilon Greensteinca003922009-08-12 22:53:28 -07006746 break;
6747 }
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006748
6749 return rc;
Eilon Greensteinca003922009-08-12 22:53:28 -07006750}
6751
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00006752/* must be called prioir to any HW initializations */
6753static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
6754{
6755 return L2_ILT_LINES(bp);
6756}
6757
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006758void bnx2x_ilt_set_info(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006759{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006760 struct ilt_client_info *ilt_client;
6761 struct bnx2x_ilt *ilt = BP_ILT(bp);
6762 u16 line = 0;
6763
6764 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
6765 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
6766
6767 /* CDU */
6768 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6769 ilt_client->client_num = ILT_CLIENT_CDU;
6770 ilt_client->page_size = CDU_ILT_PAGE_SZ;
6771 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6772 ilt_client->start = line;
6773 line += L2_ILT_LINES(bp);
6774#ifdef BCM_CNIC
6775 line += CNIC_ILT_LINES;
6776#endif
6777 ilt_client->end = line - 1;
6778
6779 DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6780 "flags 0x%x, hw psz %d\n",
6781 ilt_client->start,
6782 ilt_client->end,
6783 ilt_client->page_size,
6784 ilt_client->flags,
6785 ilog2(ilt_client->page_size >> 12));
6786
6787 /* QM */
6788 if (QM_INIT(bp->qm_cid_count)) {
6789 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6790 ilt_client->client_num = ILT_CLIENT_QM;
6791 ilt_client->page_size = QM_ILT_PAGE_SZ;
6792 ilt_client->flags = 0;
6793 ilt_client->start = line;
6794
6795 /* 4 bytes for each cid */
6796 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6797 QM_ILT_PAGE_SZ);
6798
6799 ilt_client->end = line - 1;
6800
6801 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6802 "flags 0x%x, hw psz %d\n",
6803 ilt_client->start,
6804 ilt_client->end,
6805 ilt_client->page_size,
6806 ilt_client->flags,
6807 ilog2(ilt_client->page_size >> 12));
6808
6809 }
6810 /* SRC */
6811 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6812#ifdef BCM_CNIC
6813 ilt_client->client_num = ILT_CLIENT_SRC;
6814 ilt_client->page_size = SRC_ILT_PAGE_SZ;
6815 ilt_client->flags = 0;
6816 ilt_client->start = line;
6817 line += SRC_ILT_LINES;
6818 ilt_client->end = line - 1;
6819
6820 DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6821 "flags 0x%x, hw psz %d\n",
6822 ilt_client->start,
6823 ilt_client->end,
6824 ilt_client->page_size,
6825 ilt_client->flags,
6826 ilog2(ilt_client->page_size >> 12));
6827
6828#else
6829 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6830#endif
6831
6832 /* TM */
6833 ilt_client = &ilt->clients[ILT_CLIENT_TM];
6834#ifdef BCM_CNIC
6835 ilt_client->client_num = ILT_CLIENT_TM;
6836 ilt_client->page_size = TM_ILT_PAGE_SZ;
6837 ilt_client->flags = 0;
6838 ilt_client->start = line;
6839 line += TM_ILT_LINES;
6840 ilt_client->end = line - 1;
6841
6842 DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6843 "flags 0x%x, hw psz %d\n",
6844 ilt_client->start,
6845 ilt_client->end,
6846 ilt_client->page_size,
6847 ilt_client->flags,
6848 ilog2(ilt_client->page_size >> 12));
6849
6850#else
6851 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6852#endif
6853}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006854
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006855int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6856 int is_leading)
6857{
6858 struct bnx2x_client_init_params params = { {0} };
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006859 int rc;
6860
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006861 /* reset IGU state skip FCoE L2 queue */
6862 if (!IS_FCOE_FP(fp))
6863 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006864 IGU_INT_ENABLE, 0);
6865
6866 params.ramrod_params.pstate = &fp->state;
6867 params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
6868 params.ramrod_params.index = fp->index;
6869 params.ramrod_params.cid = fp->cid;
6870
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006871#ifdef BCM_CNIC
6872 if (IS_FCOE_FP(fp))
6873 params.ramrod_params.flags |= CLIENT_IS_FCOE;
6874
6875#endif
6876
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006877 if (is_leading)
6878 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
6879
6880 bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
6881
6882 bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
6883
6884 rc = bnx2x_setup_fw_client(bp, &params, 1,
6885 bnx2x_sp(bp, client_init_data),
6886 bnx2x_sp_mapping(bp, client_init_data));
6887 return rc;
6888}
6889
stephen hemminger8d962862010-10-21 07:50:56 +00006890static int bnx2x_stop_fw_client(struct bnx2x *bp,
6891 struct bnx2x_client_ramrod_params *p)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006892{
6893 int rc;
6894
6895 int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
6896
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006897 /* halt the connection */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006898 *p->pstate = BNX2X_FP_STATE_HALTING;
6899 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
6900 p->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006901
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006902 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006903 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
6904 p->pstate, poll_flag);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006905 if (rc) /* timeout */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006906 return rc;
6907
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006908 *p->pstate = BNX2X_FP_STATE_TERMINATING;
6909 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
6910 p->cl_id, 0);
6911 /* Wait for completion */
6912 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
6913 p->pstate, poll_flag);
6914 if (rc) /* timeout */
6915 return rc;
6916
6917
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006918 /* delete cfc entry */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006919 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006920
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006921 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006922 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
6923 p->pstate, WAIT_RAMROD_COMMON);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006924 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006925}
6926
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006927static int bnx2x_stop_client(struct bnx2x *bp, int index)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006928{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006929 struct bnx2x_client_ramrod_params client_stop = {0};
6930 struct bnx2x_fastpath *fp = &bp->fp[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006931
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006932 client_stop.index = index;
6933 client_stop.cid = fp->cid;
6934 client_stop.cl_id = fp->cl_id;
6935 client_stop.pstate = &(fp->state);
6936 client_stop.poll = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006937
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006938 return bnx2x_stop_fw_client(bp, &client_stop);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006939}
6940
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006941
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006942static void bnx2x_reset_func(struct bnx2x *bp)
6943{
6944 int port = BP_PORT(bp);
6945 int func = BP_FUNC(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006946 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006947 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006948 (CHIP_IS_E2(bp) ?
6949 offsetof(struct hc_status_block_data_e2, common) :
6950 offsetof(struct hc_status_block_data_e1x, common));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006951 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
6952 int pfid_offset = offsetof(struct pci_entity, pf_id);
6953
6954 /* Disable the function in the FW */
6955 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
6956 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
6957 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
6958 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
6959
6960 /* FP SBs */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006961 for_each_eth_queue(bp, i) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006962 struct bnx2x_fastpath *fp = &bp->fp[i];
6963 REG_WR8(bp,
6964 BAR_CSTRORM_INTMEM +
6965 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
6966 + pfunc_offset_fp + pfid_offset,
6967 HC_FUNCTION_DISABLED);
6968 }
6969
6970 /* SP SB */
6971 REG_WR8(bp,
6972 BAR_CSTRORM_INTMEM +
6973 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
6974 pfunc_offset_sp + pfid_offset,
6975 HC_FUNCTION_DISABLED);
6976
6977
6978 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
6979 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
6980 0);
Eliezer Tamir49d66772008-02-28 11:53:13 -08006981
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006982 /* Configure IGU */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006983 if (bp->common.int_block == INT_BLOCK_HC) {
6984 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6985 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6986 } else {
6987 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
6988 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
6989 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006990
Michael Chan37b091b2009-10-10 13:46:55 +00006991#ifdef BCM_CNIC
6992 /* Disable Timer scan */
6993 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
6994 /*
6995 * Wait for at least 10ms and up to 2 second for the timers scan to
6996 * complete
6997 */
6998 for (i = 0; i < 200; i++) {
6999 msleep(10);
7000 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7001 break;
7002 }
7003#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007004 /* Clear ILT */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007005 bnx2x_clear_func_ilt(bp, func);
7006
7007 /* Timers workaround bug for E2: if this is vnic-3,
7008 * we need to set the entire ilt range for this timers.
7009 */
7010 if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
7011 struct ilt_client_info ilt_cli;
7012 /* use dummy TM client */
7013 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
7014 ilt_cli.start = 0;
7015 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
7016 ilt_cli.client_num = ILT_CLIENT_TM;
7017
7018 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
7019 }
7020
7021 /* this assumes that reset_port() called before reset_func()*/
7022 if (CHIP_IS_E2(bp))
7023 bnx2x_pf_disable(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007024
7025 bp->dmae_ready = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007026}
7027
7028static void bnx2x_reset_port(struct bnx2x *bp)
7029{
7030 int port = BP_PORT(bp);
7031 u32 val;
7032
7033 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7034
7035 /* Do not rcv packets to BRB */
7036 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7037 /* Do not direct rcv packets that are not for MCP to the BRB */
7038 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7039 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7040
7041 /* Configure AEU */
7042 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7043
7044 msleep(100);
7045 /* Check for BRB port occupancy */
7046 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7047 if (val)
7048 DP(NETIF_MSG_IFDOWN,
Eilon Greenstein33471622008-08-13 15:59:08 -07007049 "BRB1 is not empty %d blocks are occupied\n", val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007050
7051 /* TODO: Close Doorbell port? */
7052}
7053
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007054static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7055{
7056 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007057 BP_ABS_FUNC(bp), reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007058
7059 switch (reset_code) {
7060 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7061 bnx2x_reset_port(bp);
7062 bnx2x_reset_func(bp);
7063 bnx2x_reset_common(bp);
7064 break;
7065
7066 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7067 bnx2x_reset_port(bp);
7068 bnx2x_reset_func(bp);
7069 break;
7070
7071 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7072 bnx2x_reset_func(bp);
7073 break;
7074
7075 default:
7076 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7077 break;
7078 }
7079}
7080
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00007081#ifdef BCM_CNIC
7082static inline void bnx2x_del_fcoe_eth_macs(struct bnx2x *bp)
7083{
7084 if (bp->flags & FCOE_MACS_SET) {
7085 if (!IS_MF_SD(bp))
7086 bnx2x_set_fip_eth_mac_addr(bp, 0);
7087
7088 bnx2x_set_all_enode_macs(bp, 0);
7089
7090 bp->flags &= ~FCOE_MACS_SET;
7091 }
7092}
7093#endif
7094
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00007095void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007096{
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007097 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007098 u32 reset_code = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007099 int i, cnt, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007100
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007101 /* Wait until tx fastpath tasks complete */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00007102 for_each_tx_queue(bp, i) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08007103 struct bnx2x_fastpath *fp = &bp->fp[i];
7104
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007105 cnt = 1000;
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -08007106 while (bnx2x_has_tx_work_unload(fp)) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007107
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007108 if (!cnt) {
7109 BNX2X_ERR("timeout waiting for queue[%d]\n",
7110 i);
7111#ifdef BNX2X_STOP_ON_ERROR
7112 bnx2x_panic();
7113 return -EBUSY;
7114#else
7115 break;
7116#endif
7117 }
7118 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007119 msleep(1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007120 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08007121 }
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007122 /* Give HW time to discard old tx messages */
7123 msleep(1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007124
Yitchak Gertner65abd742008-08-25 15:26:24 -07007125 if (CHIP_IS_E1(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007126 /* invalidate mc list,
7127 * wait and poll (interrupts are off)
7128 */
7129 bnx2x_invlidate_e1_mc_list(bp);
7130 bnx2x_set_eth_mac(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007131
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007132 } else {
Yitchak Gertner65abd742008-08-25 15:26:24 -07007133 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7134
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007135 bnx2x_set_eth_mac(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007136
7137 for (i = 0; i < MC_HASH_SIZE; i++)
7138 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7139 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007140
Michael Chan993ac7b2009-10-10 13:46:56 +00007141#ifdef BCM_CNIC
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00007142 bnx2x_del_fcoe_eth_macs(bp);
Michael Chan993ac7b2009-10-10 13:46:56 +00007143#endif
Yitchak Gertner65abd742008-08-25 15:26:24 -07007144
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007145 if (unload_mode == UNLOAD_NORMAL)
7146 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007147
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00007148 else if (bp->flags & NO_WOL_FLAG)
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007149 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007150
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00007151 else if (bp->wol) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007152 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007153 u8 *mac_addr = bp->dev->dev_addr;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007154 u32 val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007155 /* The mac address is written to entries 1-4 to
7156 preserve entry 0 which is used by the PMF */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007157 u8 entry = (BP_E1HVN(bp) + 1)*8;
7158
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007159 val = (mac_addr[0] << 8) | mac_addr[1];
Eilon Greenstein3196a882008-08-13 15:58:49 -07007160 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007161
7162 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7163 (mac_addr[4] << 8) | mac_addr[5];
Eilon Greenstein3196a882008-08-13 15:58:49 -07007164 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007165
7166 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007167
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007168 } else
7169 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7170
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007171 /* Close multi and leading connections
7172 Completions for ramrods are collected in a synchronous way */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007173 for_each_queue(bp, i)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007174
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007175 if (bnx2x_stop_client(bp, i))
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007176#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007177 return;
7178#else
7179 goto unload_error;
7180#endif
7181
7182 rc = bnx2x_func_stop(bp);
7183 if (rc) {
7184 BNX2X_ERR("Function stop failed!\n");
7185#ifdef BNX2X_STOP_ON_ERROR
7186 return;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007187#else
7188 goto unload_error;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007189#endif
Eliezer Tamir228241e2008-02-28 11:56:57 -08007190 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007191#ifndef BNX2X_STOP_ON_ERROR
Eliezer Tamir228241e2008-02-28 11:56:57 -08007192unload_error:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007193#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007194 if (!BP_NOMCP(bp))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007195 reset_code = bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007196 else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007197 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
7198 "%d, %d, %d\n", BP_PATH(bp),
7199 load_count[BP_PATH(bp)][0],
7200 load_count[BP_PATH(bp)][1],
7201 load_count[BP_PATH(bp)][2]);
7202 load_count[BP_PATH(bp)][0]--;
7203 load_count[BP_PATH(bp)][1 + port]--;
7204 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
7205 "%d, %d, %d\n", BP_PATH(bp),
7206 load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
7207 load_count[BP_PATH(bp)][2]);
7208 if (load_count[BP_PATH(bp)][0] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007209 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007210 else if (load_count[BP_PATH(bp)][1 + port] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007211 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7212 else
7213 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7214 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007215
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007216 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7217 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7218 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007219
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007220 /* Disable HW interrupts, NAPI */
7221 bnx2x_netif_stop(bp, 1);
7222
7223 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00007224 bnx2x_free_irq(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007225
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007226 /* Reset the chip */
Eliezer Tamir228241e2008-02-28 11:56:57 -08007227 bnx2x_reset_chip(bp, reset_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007228
7229 /* Report UNLOAD_DONE to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007230 if (!BP_NOMCP(bp))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007231 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
Eilon Greenstein356e2382009-02-12 08:38:32 +00007232
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007233}
7234
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00007235void bnx2x_disable_close_the_gate(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007236{
7237 u32 val;
7238
7239 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
7240
7241 if (CHIP_IS_E1(bp)) {
7242 int port = BP_PORT(bp);
7243 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7244 MISC_REG_AEU_MASK_ATTN_FUNC_0;
7245
7246 val = REG_RD(bp, addr);
7247 val &= ~(0x300);
7248 REG_WR(bp, addr, val);
7249 } else if (CHIP_IS_E1H(bp)) {
7250 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
7251 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
7252 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
7253 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
7254 }
7255}
7256
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007257/* Close gates #2, #3 and #4: */
7258static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
7259{
7260 u32 val, addr;
7261
7262 /* Gates #2 and #4a are closed/opened for "not E1" only */
7263 if (!CHIP_IS_E1(bp)) {
7264 /* #4 */
7265 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
7266 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
7267 close ? (val | 0x1) : (val & (~(u32)1)));
7268 /* #2 */
7269 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
7270 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
7271 close ? (val | 0x1) : (val & (~(u32)1)));
7272 }
7273
7274 /* #3 */
7275 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
7276 val = REG_RD(bp, addr);
7277 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
7278
7279 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
7280 close ? "closing" : "opening");
7281 mmiowb();
7282}
7283
7284#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
7285
7286static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
7287{
7288 /* Do some magic... */
7289 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7290 *magic_val = val & SHARED_MF_CLP_MAGIC;
7291 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
7292}
7293
7294/* Restore the value of the `magic' bit.
7295 *
7296 * @param pdev Device handle.
7297 * @param magic_val Old value of the `magic' bit.
7298 */
7299static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
7300{
7301 /* Restore the `magic' bit value... */
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007302 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7303 MF_CFG_WR(bp, shared_mf_config.clp_mb,
7304 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
7305}
7306
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007307/**
7308 * Prepares for MCP reset: takes care of CLP configurations.
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007309 *
7310 * @param bp
7311 * @param magic_val Old value of 'magic' bit.
7312 */
7313static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
7314{
7315 u32 shmem;
7316 u32 validity_offset;
7317
7318 DP(NETIF_MSG_HW, "Starting\n");
7319
7320 /* Set `magic' bit in order to save MF config */
7321 if (!CHIP_IS_E1(bp))
7322 bnx2x_clp_reset_prep(bp, magic_val);
7323
7324 /* Get shmem offset */
7325 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7326 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7327
7328 /* Clear validity map flags */
7329 if (shmem > 0)
7330 REG_WR(bp, shmem + validity_offset, 0);
7331}
7332
7333#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
7334#define MCP_ONE_TIMEOUT 100 /* 100 ms */
7335
7336/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
7337 * depending on the HW type.
7338 *
7339 * @param bp
7340 */
7341static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
7342{
7343 /* special handling for emulation and FPGA,
7344 wait 10 times longer */
7345 if (CHIP_REV_IS_SLOW(bp))
7346 msleep(MCP_ONE_TIMEOUT*10);
7347 else
7348 msleep(MCP_ONE_TIMEOUT);
7349}
7350
7351static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
7352{
7353 u32 shmem, cnt, validity_offset, val;
7354 int rc = 0;
7355
7356 msleep(100);
7357
7358 /* Get shmem offset */
7359 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7360 if (shmem == 0) {
7361 BNX2X_ERR("Shmem 0 return failure\n");
7362 rc = -ENOTTY;
7363 goto exit_lbl;
7364 }
7365
7366 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7367
7368 /* Wait for MCP to come up */
7369 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
7370 /* TBD: its best to check validity map of last port.
7371 * currently checks on port 0.
7372 */
7373 val = REG_RD(bp, shmem + validity_offset);
7374 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
7375 shmem + validity_offset, val);
7376
7377 /* check that shared memory is valid. */
7378 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7379 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7380 break;
7381
7382 bnx2x_mcp_wait_one(bp);
7383 }
7384
7385 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
7386
7387 /* Check that shared memory is valid. This indicates that MCP is up. */
7388 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
7389 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
7390 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
7391 rc = -ENOTTY;
7392 goto exit_lbl;
7393 }
7394
7395exit_lbl:
7396 /* Restore the `magic' bit value */
7397 if (!CHIP_IS_E1(bp))
7398 bnx2x_clp_reset_done(bp, magic_val);
7399
7400 return rc;
7401}
7402
7403static void bnx2x_pxp_prep(struct bnx2x *bp)
7404{
7405 if (!CHIP_IS_E1(bp)) {
7406 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
7407 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
7408 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
7409 mmiowb();
7410 }
7411}
7412
7413/*
7414 * Reset the whole chip except for:
7415 * - PCIE core
7416 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
7417 * one reset bit)
7418 * - IGU
7419 * - MISC (including AEU)
7420 * - GRC
7421 * - RBCN, RBCP
7422 */
7423static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
7424{
7425 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
7426
7427 not_reset_mask1 =
7428 MISC_REGISTERS_RESET_REG_1_RST_HC |
7429 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
7430 MISC_REGISTERS_RESET_REG_1_RST_PXP;
7431
7432 not_reset_mask2 =
7433 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
7434 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
7435 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
7436 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
7437 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
7438 MISC_REGISTERS_RESET_REG_2_RST_GRC |
7439 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
7440 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
7441
7442 reset_mask1 = 0xffffffff;
7443
7444 if (CHIP_IS_E1(bp))
7445 reset_mask2 = 0xffff;
7446 else
7447 reset_mask2 = 0x1ffff;
7448
7449 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7450 reset_mask1 & (~not_reset_mask1));
7451 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7452 reset_mask2 & (~not_reset_mask2));
7453
7454 barrier();
7455 mmiowb();
7456
7457 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
7458 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
7459 mmiowb();
7460}
7461
7462static int bnx2x_process_kill(struct bnx2x *bp)
7463{
7464 int cnt = 1000;
7465 u32 val = 0;
7466 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
7467
7468
7469 /* Empty the Tetris buffer, wait for 1s */
7470 do {
7471 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
7472 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
7473 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
7474 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
7475 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
7476 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
7477 ((port_is_idle_0 & 0x1) == 0x1) &&
7478 ((port_is_idle_1 & 0x1) == 0x1) &&
7479 (pgl_exp_rom2 == 0xffffffff))
7480 break;
7481 msleep(1);
7482 } while (cnt-- > 0);
7483
7484 if (cnt <= 0) {
7485 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
7486 " are still"
7487 " outstanding read requests after 1s!\n");
7488 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
7489 " port_is_idle_0=0x%08x,"
7490 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
7491 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
7492 pgl_exp_rom2);
7493 return -EAGAIN;
7494 }
7495
7496 barrier();
7497
7498 /* Close gates #2, #3 and #4 */
7499 bnx2x_set_234_gates(bp, true);
7500
7501 /* TBD: Indicate that "process kill" is in progress to MCP */
7502
7503 /* Clear "unprepared" bit */
7504 REG_WR(bp, MISC_REG_UNPREPARED, 0);
7505 barrier();
7506
7507 /* Make sure all is written to the chip before the reset */
7508 mmiowb();
7509
7510 /* Wait for 1ms to empty GLUE and PCI-E core queues,
7511 * PSWHST, GRC and PSWRD Tetris buffer.
7512 */
7513 msleep(1);
7514
7515 /* Prepare to chip reset: */
7516 /* MCP */
7517 bnx2x_reset_mcp_prep(bp, &val);
7518
7519 /* PXP */
7520 bnx2x_pxp_prep(bp);
7521 barrier();
7522
7523 /* reset the chip */
7524 bnx2x_process_kill_chip_reset(bp);
7525 barrier();
7526
7527 /* Recover after reset: */
7528 /* MCP */
7529 if (bnx2x_reset_mcp_comp(bp, val))
7530 return -EAGAIN;
7531
7532 /* PXP */
7533 bnx2x_pxp_prep(bp);
7534
7535 /* Open the gates #2, #3 and #4 */
7536 bnx2x_set_234_gates(bp, false);
7537
7538 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7539 * reset state, re-enable attentions. */
7540
7541 return 0;
7542}
7543
7544static int bnx2x_leader_reset(struct bnx2x *bp)
7545{
7546 int rc = 0;
7547 /* Try to recover after the failure */
7548 if (bnx2x_process_kill(bp)) {
7549 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
7550 bp->dev->name);
7551 rc = -EAGAIN;
7552 goto exit_leader_reset;
7553 }
7554
7555 /* Clear "reset is in progress" bit and update the driver state */
7556 bnx2x_set_reset_done(bp);
7557 bp->recovery_state = BNX2X_RECOVERY_DONE;
7558
7559exit_leader_reset:
7560 bp->is_leader = 0;
7561 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
7562 smp_wmb();
7563 return rc;
7564}
7565
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007566/* Assumption: runs under rtnl lock. This together with the fact
7567 * that it's called only from bnx2x_reset_task() ensure that it
7568 * will never be called when netif_running(bp->dev) is false.
7569 */
7570static void bnx2x_parity_recover(struct bnx2x *bp)
7571{
7572 DP(NETIF_MSG_HW, "Handling parity\n");
7573 while (1) {
7574 switch (bp->recovery_state) {
7575 case BNX2X_RECOVERY_INIT:
7576 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
7577 /* Try to get a LEADER_LOCK HW lock */
7578 if (bnx2x_trylock_hw_lock(bp,
7579 HW_LOCK_RESOURCE_RESERVED_08))
7580 bp->is_leader = 1;
7581
7582 /* Stop the driver */
7583 /* If interface has been removed - break */
7584 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
7585 return;
7586
7587 bp->recovery_state = BNX2X_RECOVERY_WAIT;
7588 /* Ensure "is_leader" and "recovery_state"
7589 * update values are seen on other CPUs
7590 */
7591 smp_wmb();
7592 break;
7593
7594 case BNX2X_RECOVERY_WAIT:
7595 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
7596 if (bp->is_leader) {
7597 u32 load_counter = bnx2x_get_load_cnt(bp);
7598 if (load_counter) {
7599 /* Wait until all other functions get
7600 * down.
7601 */
7602 schedule_delayed_work(&bp->reset_task,
7603 HZ/10);
7604 return;
7605 } else {
7606 /* If all other functions got down -
7607 * try to bring the chip back to
7608 * normal. In any case it's an exit
7609 * point for a leader.
7610 */
7611 if (bnx2x_leader_reset(bp) ||
7612 bnx2x_nic_load(bp, LOAD_NORMAL)) {
7613 printk(KERN_ERR"%s: Recovery "
7614 "has failed. Power cycle is "
7615 "needed.\n", bp->dev->name);
7616 /* Disconnect this device */
7617 netif_device_detach(bp->dev);
7618 /* Block ifup for all function
7619 * of this ASIC until
7620 * "process kill" or power
7621 * cycle.
7622 */
7623 bnx2x_set_reset_in_progress(bp);
7624 /* Shut down the power */
7625 bnx2x_set_power_state(bp,
7626 PCI_D3hot);
7627 return;
7628 }
7629
7630 return;
7631 }
7632 } else { /* non-leader */
7633 if (!bnx2x_reset_is_done(bp)) {
7634 /* Try to get a LEADER_LOCK HW lock as
7635 * long as a former leader may have
7636 * been unloaded by the user or
7637 * released a leadership by another
7638 * reason.
7639 */
7640 if (bnx2x_trylock_hw_lock(bp,
7641 HW_LOCK_RESOURCE_RESERVED_08)) {
7642 /* I'm a leader now! Restart a
7643 * switch case.
7644 */
7645 bp->is_leader = 1;
7646 break;
7647 }
7648
7649 schedule_delayed_work(&bp->reset_task,
7650 HZ/10);
7651 return;
7652
7653 } else { /* A leader has completed
7654 * the "process kill". It's an exit
7655 * point for a non-leader.
7656 */
7657 bnx2x_nic_load(bp, LOAD_NORMAL);
7658 bp->recovery_state =
7659 BNX2X_RECOVERY_DONE;
7660 smp_wmb();
7661 return;
7662 }
7663 }
7664 default:
7665 return;
7666 }
7667 }
7668}
7669
7670/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7671 * scheduled on a general queue in order to prevent a dead lock.
7672 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007673static void bnx2x_reset_task(struct work_struct *work)
7674{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007675 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007676
7677#ifdef BNX2X_STOP_ON_ERROR
7678 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7679 " so reset not done to allow debug dump,\n"
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007680 KERN_ERR " you will need to reboot when done\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007681 return;
7682#endif
7683
7684 rtnl_lock();
7685
7686 if (!netif_running(bp->dev))
7687 goto reset_task_exit;
7688
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007689 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
7690 bnx2x_parity_recover(bp);
7691 else {
7692 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7693 bnx2x_nic_load(bp, LOAD_NORMAL);
7694 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007695
7696reset_task_exit:
7697 rtnl_unlock();
7698}
7699
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007700/* end of nic load/unload */
7701
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007702/*
7703 * Init service functions
7704 */
7705
stephen hemminger8d962862010-10-21 07:50:56 +00007706static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007707{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007708 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
7709 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
7710 return base + (BP_ABS_FUNC(bp)) * stride;
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007711}
7712
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007713static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007714{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007715 u32 reg = bnx2x_get_pretend_reg(bp);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007716
7717 /* Flush all outstanding writes */
7718 mmiowb();
7719
7720 /* Pretend to be function 0 */
7721 REG_WR(bp, reg, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007722 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007723
7724 /* From now we are in the "like-E1" mode */
7725 bnx2x_int_disable(bp);
7726
7727 /* Flush all outstanding writes */
7728 mmiowb();
7729
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007730 /* Restore the original function */
7731 REG_WR(bp, reg, BP_ABS_FUNC(bp));
7732 REG_RD(bp, reg);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007733}
7734
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007735static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007736{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007737 if (CHIP_IS_E1(bp))
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007738 bnx2x_int_disable(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007739 else
7740 bnx2x_undi_int_disable_e1h(bp);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007741}
7742
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007743static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007744{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007745 u32 val;
7746
7747 /* Check if there is any driver already loaded */
7748 val = REG_RD(bp, MISC_REG_UNPREPARED);
7749 if (val == 0x1) {
7750 /* Check if it is the UNDI driver
7751 * UNDI driver initializes CID offset for normal bell to 0x7
7752 */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07007753 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007754 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7755 if (val == 0x7) {
7756 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007757 /* save our pf_num */
7758 int orig_pf_num = bp->pf_num;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007759 u32 swap_en;
7760 u32 swap_val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007761
Eilon Greensteinb4661732009-01-14 06:43:56 +00007762 /* clear the UNDI indication */
7763 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7764
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007765 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7766
7767 /* try unload UNDI on port 0 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007768 bp->pf_num = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007769 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007770 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007771 DRV_MSG_SEQ_NUMBER_MASK);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007772 reset_code = bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007773
7774 /* if UNDI is loaded on the other port */
7775 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7776
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007777 /* send "DONE" for previous unload */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007778 bnx2x_fw_command(bp,
7779 DRV_MSG_CODE_UNLOAD_DONE, 0);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007780
7781 /* unload UNDI on port 1 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007782 bp->pf_num = 1;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007783 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007784 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007785 DRV_MSG_SEQ_NUMBER_MASK);
7786 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007787
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007788 bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007789 }
7790
Eilon Greensteinb4661732009-01-14 06:43:56 +00007791 /* now it's safe to release the lock */
7792 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7793
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007794 bnx2x_undi_int_disable(bp);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007795
7796 /* close input traffic and wait for it */
7797 /* Do not rcv packets to BRB */
7798 REG_WR(bp,
7799 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7800 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7801 /* Do not direct rcv packets that are not for MCP to
7802 * the BRB */
7803 REG_WR(bp,
7804 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7805 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7806 /* clear AEU */
7807 REG_WR(bp,
7808 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7809 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7810 msleep(10);
7811
7812 /* save NIG port swap info */
7813 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7814 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007815 /* reset device */
7816 REG_WR(bp,
7817 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007818 0xd3ffffff);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007819 REG_WR(bp,
7820 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7821 0x1403);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007822 /* take the NIG out of reset and restore swap values */
7823 REG_WR(bp,
7824 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7825 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7826 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7827 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7828
7829 /* send unload done to the MCP */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007830 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007831
7832 /* restore our func and fw_seq */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007833 bp->pf_num = orig_pf_num;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007834 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007835 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007836 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greensteinb4661732009-01-14 06:43:56 +00007837 } else
7838 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007839 }
7840}
7841
7842static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7843{
7844 u32 val, val2, val3, val4, id;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07007845 u16 pmc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007846
7847 /* Get the chip revision id and number. */
7848 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7849 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7850 id = ((val & 0xffff) << 16);
7851 val = REG_RD(bp, MISC_REG_CHIP_REV);
7852 id |= ((val & 0xf) << 12);
7853 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7854 id |= ((val & 0xff) << 4);
Eilon Greenstein5a40e082009-01-14 06:44:04 +00007855 val = REG_RD(bp, MISC_REG_BOND_ID);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007856 id |= (val & 0xf);
7857 bp->common.chip_id = id;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007858
7859 /* Set doorbell size */
7860 bp->db_size = (1 << BNX2X_DB_SHIFT);
7861
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007862 if (CHIP_IS_E2(bp)) {
7863 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7864 if ((val & 1) == 0)
7865 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7866 else
7867 val = (val >> 1) & 1;
7868 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7869 "2_PORT_MODE");
7870 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7871 CHIP_2_PORT_MODE;
7872
7873 if (CHIP_MODE_IS_4_PORT(bp))
7874 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
7875 else
7876 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
7877 } else {
7878 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
7879 bp->pfid = bp->pf_num; /* 0..7 */
7880 }
7881
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007882 /*
7883 * set base FW non-default (fast path) status block id, this value is
7884 * used to initialize the fw_sb_id saved on the fp/queue structure to
7885 * determine the id used by the FW.
7886 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007887 if (CHIP_IS_E1x(bp))
7888 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7889 else /* E2 */
7890 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7891
7892 bp->link_params.chip_id = bp->common.chip_id;
7893 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007894
Eilon Greenstein1c063282009-02-12 08:36:43 +00007895 val = (REG_RD(bp, 0x2874) & 0x55);
7896 if ((bp->common.chip_id & 0x1) ||
7897 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7898 bp->flags |= ONE_PORT_FLAG;
7899 BNX2X_DEV_INFO("single port device\n");
7900 }
7901
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007902 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7903 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7904 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7905 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7906 bp->common.flash_size, bp->common.flash_size);
7907
7908 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007909 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
7910 MISC_REG_GENERIC_CR_1 :
7911 MISC_REG_GENERIC_CR_0));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007912 bp->link_params.shmem_base = bp->common.shmem_base;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007913 bp->link_params.shmem2_base = bp->common.shmem2_base;
Eilon Greenstein2691d512009-08-12 08:22:08 +00007914 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7915 bp->common.shmem_base, bp->common.shmem2_base);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007916
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007917 if (!bp->common.shmem_base) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007918 BNX2X_DEV_INFO("MCP not active\n");
7919 bp->flags |= NO_MCP_FLAG;
7920 return;
7921 }
7922
7923 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7924 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7925 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007926 BNX2X_ERR("BAD MCP validity signature\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007927
7928 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00007929 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007930
7931 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7932 SHARED_HW_CFG_LED_MODE_MASK) >>
7933 SHARED_HW_CFG_LED_MODE_SHIFT);
7934
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00007935 bp->link_params.feature_config_flags = 0;
7936 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7937 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7938 bp->link_params.feature_config_flags |=
7939 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7940 else
7941 bp->link_params.feature_config_flags &=
7942 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7943
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007944 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7945 bp->common.bc_ver = val;
7946 BNX2X_DEV_INFO("bc_ver %X\n", val);
7947 if (val < BNX2X_BC_VER) {
7948 /* for now only warn
7949 * later we might need to enforce this */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007950 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
7951 "please upgrade BC\n", BNX2X_BC_VER, val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007952 }
Eilon Greenstein4d295db2009-07-21 05:47:47 +00007953 bp->link_params.feature_config_flags |=
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007954 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007955 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7956
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007957 bp->link_params.feature_config_flags |=
7958 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
7959 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07007960
7961 if (BP_E1HVN(bp) == 0) {
7962 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7963 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7964 } else {
7965 /* no WOL capability for E1HVN != 0 */
7966 bp->flags |= NO_WOL_FLAG;
7967 }
7968 BNX2X_DEV_INFO("%sWoL capable\n",
Eilon Greensteinf5372252009-02-12 08:38:30 +00007969 (bp->flags & NO_WOL_FLAG) ? "not " : "");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007970
7971 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7972 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7973 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7974 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7975
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007976 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
7977 val, val2, val3, val4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007978}
7979
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007980#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7981#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7982
7983static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7984{
7985 int pfid = BP_FUNC(bp);
7986 int vn = BP_E1HVN(bp);
7987 int igu_sb_id;
7988 u32 val;
7989 u8 fid;
7990
7991 bp->igu_base_sb = 0xff;
7992 bp->igu_sb_cnt = 0;
7993 if (CHIP_INT_MODE_IS_BC(bp)) {
7994 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00007995 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007996
7997 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
7998 FP_SB_MAX_E1x;
7999
8000 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
8001 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
8002
8003 return;
8004 }
8005
8006 /* IGU in normal mode - read CAM */
8007 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
8008 igu_sb_id++) {
8009 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
8010 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
8011 continue;
8012 fid = IGU_FID(val);
8013 if ((fid & IGU_FID_ENCODE_IS_PF)) {
8014 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
8015 continue;
8016 if (IGU_VEC(val) == 0)
8017 /* default status block */
8018 bp->igu_dsb_id = igu_sb_id;
8019 else {
8020 if (bp->igu_base_sb == 0xff)
8021 bp->igu_base_sb = igu_sb_id;
8022 bp->igu_sb_cnt++;
8023 }
8024 }
8025 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00008026 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8027 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008028 if (bp->igu_sb_cnt == 0)
8029 BNX2X_ERR("CAM configuration error\n");
8030}
8031
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008032static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8033 u32 switch_cfg)
8034{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008035 int cfg_size = 0, idx, port = BP_PORT(bp);
8036
8037 /* Aggregation of supported attributes of all external phys */
8038 bp->port.supported[0] = 0;
8039 bp->port.supported[1] = 0;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008040 switch (bp->link_params.num_phys) {
8041 case 1:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008042 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
8043 cfg_size = 1;
8044 break;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008045 case 2:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008046 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
8047 cfg_size = 1;
8048 break;
8049 case 3:
8050 if (bp->link_params.multi_phy_config &
8051 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
8052 bp->port.supported[1] =
8053 bp->link_params.phy[EXT_PHY1].supported;
8054 bp->port.supported[0] =
8055 bp->link_params.phy[EXT_PHY2].supported;
8056 } else {
8057 bp->port.supported[0] =
8058 bp->link_params.phy[EXT_PHY1].supported;
8059 bp->port.supported[1] =
8060 bp->link_params.phy[EXT_PHY2].supported;
8061 }
8062 cfg_size = 2;
8063 break;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008064 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008065
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008066 if (!(bp->port.supported[0] || bp->port.supported[1])) {
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008067 BNX2X_ERR("NVRAM config error. BAD phy config."
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008068 "PHY1 config 0x%x, PHY2 config 0x%x\n",
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008069 SHMEM_RD(bp,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008070 dev_info.port_hw_config[port].external_phy_config),
8071 SHMEM_RD(bp,
8072 dev_info.port_hw_config[port].external_phy_config2));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008073 return;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008074 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008075
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008076 switch (switch_cfg) {
8077 case SWITCH_CFG_1G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008078 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8079 port*0x10);
8080 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008081 break;
8082
8083 case SWITCH_CFG_10G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008084 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8085 port*0x18);
8086 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008087 break;
8088
8089 default:
8090 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008091 bp->port.link_config[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008092 return;
8093 }
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008094 /* mask what we support according to speed_cap_mask per configuration */
8095 for (idx = 0; idx < cfg_size; idx++) {
8096 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008097 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008098 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008099
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008100 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008101 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008102 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008103
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008104 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008105 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008106 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008107
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008108 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008109 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008110 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008111
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008112 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008113 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008114 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008115 SUPPORTED_1000baseT_Full);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008116
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008117 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008118 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008119 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008120
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008121 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008122 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008123 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008124
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008125 }
8126
8127 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
8128 bp->port.supported[1]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008129}
8130
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008131static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008132{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008133 u32 link_config, idx, cfg_size = 0;
8134 bp->port.advertising[0] = 0;
8135 bp->port.advertising[1] = 0;
8136 switch (bp->link_params.num_phys) {
8137 case 1:
8138 case 2:
8139 cfg_size = 1;
8140 break;
8141 case 3:
8142 cfg_size = 2;
8143 break;
8144 }
8145 for (idx = 0; idx < cfg_size; idx++) {
8146 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
8147 link_config = bp->port.link_config[idx];
8148 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008149 case PORT_FEATURE_LINK_SPEED_AUTO:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008150 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
8151 bp->link_params.req_line_speed[idx] =
8152 SPEED_AUTO_NEG;
8153 bp->port.advertising[idx] |=
8154 bp->port.supported[idx];
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008155 } else {
8156 /* force 10G, no AN */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008157 bp->link_params.req_line_speed[idx] =
8158 SPEED_10000;
8159 bp->port.advertising[idx] |=
8160 (ADVERTISED_10000baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008161 ADVERTISED_FIBRE);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008162 continue;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008163 }
8164 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008165
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008166 case PORT_FEATURE_LINK_SPEED_10M_FULL:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008167 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
8168 bp->link_params.req_line_speed[idx] =
8169 SPEED_10;
8170 bp->port.advertising[idx] |=
8171 (ADVERTISED_10baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008172 ADVERTISED_TP);
8173 } else {
8174 BNX2X_ERROR("NVRAM config error. "
8175 "Invalid link_config 0x%x"
8176 " speed_cap_mask 0x%x\n",
8177 link_config,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008178 bp->link_params.speed_cap_mask[idx]);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008179 return;
8180 }
8181 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008182
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008183 case PORT_FEATURE_LINK_SPEED_10M_HALF:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008184 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
8185 bp->link_params.req_line_speed[idx] =
8186 SPEED_10;
8187 bp->link_params.req_duplex[idx] =
8188 DUPLEX_HALF;
8189 bp->port.advertising[idx] |=
8190 (ADVERTISED_10baseT_Half |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008191 ADVERTISED_TP);
8192 } else {
8193 BNX2X_ERROR("NVRAM config error. "
8194 "Invalid link_config 0x%x"
8195 " speed_cap_mask 0x%x\n",
8196 link_config,
8197 bp->link_params.speed_cap_mask[idx]);
8198 return;
8199 }
8200 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008201
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008202 case PORT_FEATURE_LINK_SPEED_100M_FULL:
8203 if (bp->port.supported[idx] &
8204 SUPPORTED_100baseT_Full) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008205 bp->link_params.req_line_speed[idx] =
8206 SPEED_100;
8207 bp->port.advertising[idx] |=
8208 (ADVERTISED_100baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008209 ADVERTISED_TP);
8210 } else {
8211 BNX2X_ERROR("NVRAM config error. "
8212 "Invalid link_config 0x%x"
8213 " speed_cap_mask 0x%x\n",
8214 link_config,
8215 bp->link_params.speed_cap_mask[idx]);
8216 return;
8217 }
8218 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008219
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008220 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8221 if (bp->port.supported[idx] &
8222 SUPPORTED_100baseT_Half) {
8223 bp->link_params.req_line_speed[idx] =
8224 SPEED_100;
8225 bp->link_params.req_duplex[idx] =
8226 DUPLEX_HALF;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008227 bp->port.advertising[idx] |=
8228 (ADVERTISED_100baseT_Half |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008229 ADVERTISED_TP);
8230 } else {
8231 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008232 "Invalid link_config 0x%x"
8233 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008234 link_config,
8235 bp->link_params.speed_cap_mask[idx]);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008236 return;
8237 }
8238 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008239
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008240 case PORT_FEATURE_LINK_SPEED_1G:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008241 if (bp->port.supported[idx] &
8242 SUPPORTED_1000baseT_Full) {
8243 bp->link_params.req_line_speed[idx] =
8244 SPEED_1000;
8245 bp->port.advertising[idx] |=
8246 (ADVERTISED_1000baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008247 ADVERTISED_TP);
8248 } else {
8249 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008250 "Invalid link_config 0x%x"
8251 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008252 link_config,
8253 bp->link_params.speed_cap_mask[idx]);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008254 return;
8255 }
8256 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008257
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008258 case PORT_FEATURE_LINK_SPEED_2_5G:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008259 if (bp->port.supported[idx] &
8260 SUPPORTED_2500baseX_Full) {
8261 bp->link_params.req_line_speed[idx] =
8262 SPEED_2500;
8263 bp->port.advertising[idx] |=
8264 (ADVERTISED_2500baseX_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008265 ADVERTISED_TP);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008266 } else {
8267 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008268 "Invalid link_config 0x%x"
8269 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008270 link_config,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008271 bp->link_params.speed_cap_mask[idx]);
8272 return;
8273 }
8274 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008275
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008276 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8277 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8278 case PORT_FEATURE_LINK_SPEED_10G_KR:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008279 if (bp->port.supported[idx] &
8280 SUPPORTED_10000baseT_Full) {
8281 bp->link_params.req_line_speed[idx] =
8282 SPEED_10000;
8283 bp->port.advertising[idx] |=
8284 (ADVERTISED_10000baseT_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008285 ADVERTISED_FIBRE);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008286 } else {
8287 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008288 "Invalid link_config 0x%x"
8289 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008290 link_config,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008291 bp->link_params.speed_cap_mask[idx]);
8292 return;
8293 }
8294 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008295
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008296 default:
8297 BNX2X_ERROR("NVRAM config error. "
8298 "BAD link speed link_config 0x%x\n",
8299 link_config);
8300 bp->link_params.req_line_speed[idx] =
8301 SPEED_AUTO_NEG;
8302 bp->port.advertising[idx] =
8303 bp->port.supported[idx];
8304 break;
8305 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008306
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008307 bp->link_params.req_flow_ctrl[idx] = (link_config &
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008308 PORT_FEATURE_FLOW_CONTROL_MASK);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008309 if ((bp->link_params.req_flow_ctrl[idx] ==
8310 BNX2X_FLOW_CTRL_AUTO) &&
8311 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
8312 bp->link_params.req_flow_ctrl[idx] =
8313 BNX2X_FLOW_CTRL_NONE;
8314 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008315
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008316 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
8317 " 0x%x advertising 0x%x\n",
8318 bp->link_params.req_line_speed[idx],
8319 bp->link_params.req_duplex[idx],
8320 bp->link_params.req_flow_ctrl[idx],
8321 bp->port.advertising[idx]);
8322 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008323}
8324
Michael Chane665bfd2009-10-10 13:46:54 +00008325static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8326{
8327 mac_hi = cpu_to_be16(mac_hi);
8328 mac_lo = cpu_to_be32(mac_lo);
8329 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8330 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8331}
8332
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008333static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008334{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008335 int port = BP_PORT(bp);
Eilon Greenstein589abe32009-02-12 08:36:55 +00008336 u32 config;
Joe Perches6f38ad92010-11-14 17:04:31 +00008337 u32 ext_phy_type, ext_phy_config;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008338
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008339 bp->link_params.bp = bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008340 bp->link_params.port = port;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008341
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008342 bp->link_params.lane_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008343 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008344
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008345 bp->link_params.speed_cap_mask[0] =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008346 SHMEM_RD(bp,
8347 dev_info.port_hw_config[port].speed_capability_mask);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008348 bp->link_params.speed_cap_mask[1] =
8349 SHMEM_RD(bp,
8350 dev_info.port_hw_config[port].speed_capability_mask2);
8351 bp->port.link_config[0] =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008352 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8353
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008354 bp->port.link_config[1] =
8355 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008356
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008357 bp->link_params.multi_phy_config =
8358 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008359 /* If the device is capable of WoL, set the default state according
8360 * to the HW
8361 */
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008362 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008363 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8364 (config & PORT_FEATURE_WOL_ENABLED));
8365
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008366 BNX2X_DEV_INFO("lane_config 0x%08x "
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008367 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008368 bp->link_params.lane_config,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008369 bp->link_params.speed_cap_mask[0],
8370 bp->port.link_config[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008371
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008372 bp->link_params.switch_cfg = (bp->port.link_config[0] &
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008373 PORT_FEATURE_CONNECTED_SWITCH_MASK);
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008374 bnx2x_phy_probe(&bp->link_params);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008375 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008376
8377 bnx2x_link_settings_requested(bp);
8378
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008379 /*
8380 * If connected directly, work with the internal PHY, otherwise, work
8381 * with the external PHY
8382 */
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008383 ext_phy_config =
8384 SHMEM_RD(bp,
8385 dev_info.port_hw_config[port].external_phy_config);
8386 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008387 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008388 bp->mdio.prtad = bp->port.phy_addr;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008389
8390 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8391 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8392 bp->mdio.prtad =
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008393 XGXS_EXT_PHY_ADDR(ext_phy_config);
Yaniv Rosner5866df62011-01-30 04:15:07 +00008394
8395 /*
8396 * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
8397 * In MF mode, it is set to cover self test cases
8398 */
8399 if (IS_MF(bp))
8400 bp->port.need_hw_lock = 1;
8401 else
8402 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
8403 bp->common.shmem_base,
8404 bp->common.shmem2_base);
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008405}
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008406
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008407static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8408{
8409 u32 val, val2;
8410 int func = BP_ABS_FUNC(bp);
8411 int port = BP_PORT(bp);
8412
8413 if (BP_NOMCP(bp)) {
8414 BNX2X_ERROR("warning: random MAC workaround active\n");
8415 random_ether_addr(bp->dev->dev_addr);
8416 } else if (IS_MF(bp)) {
8417 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8418 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
8419 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8420 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
8421 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8422
8423#ifdef BCM_CNIC
8424 /* iSCSI NPAR MAC */
8425 if (IS_MF_SI(bp)) {
8426 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
8427 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
8428 val2 = MF_CFG_RD(bp, func_ext_config[func].
8429 iscsi_mac_addr_upper);
8430 val = MF_CFG_RD(bp, func_ext_config[func].
8431 iscsi_mac_addr_lower);
8432 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8433 }
8434 }
8435#endif
8436 } else {
8437 /* in SF read MACs from port configuration */
8438 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8439 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8440 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8441
8442#ifdef BCM_CNIC
8443 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
8444 iscsi_mac_upper);
8445 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
8446 iscsi_mac_lower);
8447 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8448#endif
8449 }
8450
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008451 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8452 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
Michael Chan37b091b2009-10-10 13:46:55 +00008453
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00008454#ifdef BCM_CNIC
8455 /* Inform the upper layers about FCoE MAC */
8456 if (!CHIP_IS_E1x(bp)) {
8457 if (IS_MF_SD(bp))
8458 memcpy(bp->fip_mac, bp->dev->dev_addr,
8459 sizeof(bp->fip_mac));
8460 else
8461 memcpy(bp->fip_mac, bp->iscsi_mac,
8462 sizeof(bp->fip_mac));
8463 }
8464#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008465}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008466
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008467static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8468{
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008469 int /*abs*/func = BP_ABS_FUNC(bp);
8470 int vn, port;
8471 u32 val = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008472 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008473
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008474 bnx2x_get_common_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008475
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008476 if (CHIP_IS_E1x(bp)) {
8477 bp->common.int_block = INT_BLOCK_HC;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008478
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008479 bp->igu_dsb_id = DEF_SB_IGU_ID;
8480 bp->igu_base_sb = 0;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00008481 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
8482 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008483 } else {
8484 bp->common.int_block = INT_BLOCK_IGU;
8485 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8486 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8487 DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8488 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8489 } else
8490 DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
8491
8492 bnx2x_get_igu_cam_info(bp);
8493
8494 }
8495 DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
8496 bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8497
8498 /*
8499 * Initialize MF configuration
8500 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008501
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008502 bp->mf_ov = 0;
8503 bp->mf_mode = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008504 vn = BP_E1HVN(bp);
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008505 port = BP_PORT(bp);
8506
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008507 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008508 DP(NETIF_MSG_PROBE,
8509 "shmem2base 0x%x, size %d, mfcfg offset %d\n",
8510 bp->common.shmem2_base, SHMEM2_RD(bp, size),
8511 (u32)offsetof(struct shmem2_region, mf_cfg_addr));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008512 if (SHMEM2_HAS(bp, mf_cfg_addr))
8513 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8514 else
8515 bp->common.mf_cfg_base = bp->common.shmem_base +
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008516 offsetof(struct shmem_region, func_mb) +
8517 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008518 /*
8519 * get mf configuration:
8520 * 1. existance of MF configuration
8521 * 2. MAC address must be legal (check only upper bytes)
8522 * for Switch-Independent mode;
8523 * OVLAN must be legal for Switch-Dependent mode
8524 * 3. SF_MODE configures specific MF mode
8525 */
8526 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
8527 /* get mf configuration */
8528 val = SHMEM_RD(bp,
8529 dev_info.shared_feature_config.config);
8530 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008531
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008532 switch (val) {
8533 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
8534 val = MF_CFG_RD(bp, func_mf_config[func].
8535 mac_upper);
8536 /* check for legal mac (upper bytes)*/
8537 if (val != 0xffff) {
8538 bp->mf_mode = MULTI_FUNCTION_SI;
8539 bp->mf_config[vn] = MF_CFG_RD(bp,
8540 func_mf_config[func].config);
8541 } else
8542 DP(NETIF_MSG_PROBE, "illegal MAC "
8543 "address for SI\n");
8544 break;
8545 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
8546 /* get OV configuration */
8547 val = MF_CFG_RD(bp,
8548 func_mf_config[FUNC_0].e1hov_tag);
8549 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
8550
8551 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8552 bp->mf_mode = MULTI_FUNCTION_SD;
8553 bp->mf_config[vn] = MF_CFG_RD(bp,
8554 func_mf_config[func].config);
8555 } else
8556 DP(NETIF_MSG_PROBE, "illegal OV for "
8557 "SD\n");
8558 break;
8559 default:
8560 /* Unknown configuration: reset mf_config */
8561 bp->mf_config[vn] = 0;
8562 DP(NETIF_MSG_PROBE, "Unkown MF mode 0x%x\n",
8563 val);
8564 }
8565 }
8566
Eilon Greenstein2691d512009-08-12 08:22:08 +00008567 BNX2X_DEV_INFO("%s function mode\n",
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008568 IS_MF(bp) ? "multi" : "single");
Eilon Greenstein2691d512009-08-12 08:22:08 +00008569
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008570 switch (bp->mf_mode) {
8571 case MULTI_FUNCTION_SD:
8572 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
8573 FUNC_MF_CFG_E1HOV_TAG_MASK;
Eilon Greenstein2691d512009-08-12 08:22:08 +00008574 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008575 bp->mf_ov = val;
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008576 BNX2X_DEV_INFO("MF OV for func %d is %d"
8577 " (0x%04x)\n", func,
8578 bp->mf_ov, bp->mf_ov);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008579 } else {
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008580 BNX2X_ERR("No valid MF OV for func %d,"
8581 " aborting\n", func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008582 rc = -EPERM;
8583 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008584 break;
8585 case MULTI_FUNCTION_SI:
8586 BNX2X_DEV_INFO("func %d is in MF "
8587 "switch-independent mode\n", func);
8588 break;
8589 default:
8590 if (vn) {
8591 BNX2X_ERR("VN %d in single function mode,"
8592 " aborting\n", vn);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008593 rc = -EPERM;
8594 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008595 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008596 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008597
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008598 }
8599
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008600 /* adjust igu_sb_cnt to MF for E1x */
8601 if (CHIP_IS_E1x(bp) && IS_MF(bp))
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008602 bp->igu_sb_cnt /= E1HVN_MAX;
8603
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008604 /*
8605 * adjust E2 sb count: to be removed when FW will support
8606 * more then 16 L2 clients
8607 */
8608#define MAX_L2_CLIENTS 16
8609 if (CHIP_IS_E2(bp))
8610 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8611 MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8612
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008613 if (!BP_NOMCP(bp)) {
8614 bnx2x_get_port_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008615
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008616 bp->fw_seq =
8617 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8618 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008619 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8620 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008621
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008622 /* Get MAC addresses */
8623 bnx2x_get_mac_hwinfo(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008624
8625 return rc;
8626}
8627
Vladislav Zolotarov34f24c72010-04-19 01:13:23 +00008628static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
8629{
8630 int cnt, i, block_end, rodi;
8631 char vpd_data[BNX2X_VPD_LEN+1];
8632 char str_id_reg[VENDOR_ID_LEN+1];
8633 char str_id_cap[VENDOR_ID_LEN+1];
8634 u8 len;
8635
8636 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
8637 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
8638
8639 if (cnt < BNX2X_VPD_LEN)
8640 goto out_not_found;
8641
8642 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
8643 PCI_VPD_LRDT_RO_DATA);
8644 if (i < 0)
8645 goto out_not_found;
8646
8647
8648 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
8649 pci_vpd_lrdt_size(&vpd_data[i]);
8650
8651 i += PCI_VPD_LRDT_TAG_SIZE;
8652
8653 if (block_end > BNX2X_VPD_LEN)
8654 goto out_not_found;
8655
8656 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8657 PCI_VPD_RO_KEYWORD_MFR_ID);
8658 if (rodi < 0)
8659 goto out_not_found;
8660
8661 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8662
8663 if (len != VENDOR_ID_LEN)
8664 goto out_not_found;
8665
8666 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8667
8668 /* vendor specific info */
8669 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
8670 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
8671 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
8672 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
8673
8674 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8675 PCI_VPD_RO_KEYWORD_VENDOR0);
8676 if (rodi >= 0) {
8677 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8678
8679 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8680
8681 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
8682 memcpy(bp->fw_ver, &vpd_data[rodi], len);
8683 bp->fw_ver[len] = ' ';
8684 }
8685 }
8686 return;
8687 }
8688out_not_found:
8689 return;
8690}
8691
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008692static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8693{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008694 int func;
Eilon Greenstein87942b42009-02-12 08:36:49 +00008695 int timer_interval;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008696 int rc;
8697
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008698 /* Disable interrupt handling until HW is initialized */
8699 atomic_set(&bp->intr_sem, 1);
Eilon Greensteine1510702009-07-21 05:47:41 +00008700 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008701
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008702 mutex_init(&bp->port.phy_mutex);
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07008703 mutex_init(&bp->fw_mb_mutex);
David S. Millerbb7e95c2010-07-27 21:01:35 -07008704 spin_lock_init(&bp->stats_lock);
Michael Chan993ac7b2009-10-10 13:46:56 +00008705#ifdef BCM_CNIC
8706 mutex_init(&bp->cnic_mutex);
8707#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008708
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08008709 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008710 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008711
8712 rc = bnx2x_get_hwinfo(bp);
8713
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008714 if (!rc)
8715 rc = bnx2x_alloc_mem_bp(bp);
8716
Vladislav Zolotarov34f24c72010-04-19 01:13:23 +00008717 bnx2x_read_fwinfo(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008718
8719 func = BP_FUNC(bp);
8720
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008721 /* need to reset chip if undi was active */
8722 if (!BP_NOMCP(bp))
8723 bnx2x_undi_unload(bp);
8724
8725 if (CHIP_REV_IS_FPGA(bp))
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008726 dev_err(&bp->pdev->dev, "FPGA detected\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008727
8728 if (BP_NOMCP(bp) && (func == 0))
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008729 dev_err(&bp->pdev->dev, "MCP disabled, "
8730 "must load devices in order!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008731
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008732 bp->multi_mode = multi_mode;
Dmitry Kravkov5d7cd492010-07-27 12:32:19 +00008733 bp->int_mode = int_mode;
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008734
Dmitry Kravkov4fd89b72010-04-01 19:45:34 -07008735 bp->dev->features |= NETIF_F_GRO;
8736
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008737 /* Set TPA flags */
8738 if (disable_tpa) {
8739 bp->flags &= ~TPA_ENABLE_FLAG;
8740 bp->dev->features &= ~NETIF_F_LRO;
8741 } else {
8742 bp->flags |= TPA_ENABLE_FLAG;
8743 bp->dev->features |= NETIF_F_LRO;
8744 }
Dmitry Kravkov5d7cd492010-07-27 12:32:19 +00008745 bp->disable_tpa = disable_tpa;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008746
Eilon Greensteina18f5122009-08-12 08:23:26 +00008747 if (CHIP_IS_E1(bp))
8748 bp->dropless_fc = 0;
8749 else
8750 bp->dropless_fc = dropless_fc;
8751
Eilon Greenstein8d5726c2009-02-12 08:37:19 +00008752 bp->mrrs = mrrs;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008753
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008754 bp->tx_ring_size = MAX_TX_AVAIL;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008755
8756 bp->rx_csum = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008757
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00008758 /* make sure that the numbers are in the right granularity */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008759 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
8760 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008761
Eilon Greenstein87942b42009-02-12 08:36:49 +00008762 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8763 bp->current_interval = (poll ? poll : timer_interval);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008764
8765 init_timer(&bp->timer);
8766 bp->timer.expires = jiffies + bp->current_interval;
8767 bp->timer.data = (unsigned long) bp;
8768 bp->timer.function = bnx2x_timer;
8769
Shmulik Ravid785b9b12010-12-30 06:27:03 +00008770 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
Vladislav Zolotarove4901dd2010-12-13 05:44:18 +00008771 bnx2x_dcbx_init_params(bp);
8772
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008773 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008774}
8775
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008776
Dmitry Kravkovde0c62d2010-07-27 12:35:24 +00008777/****************************************************************************
8778* General service functions
8779****************************************************************************/
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008780
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008781/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008782static int bnx2x_open(struct net_device *dev)
8783{
8784 struct bnx2x *bp = netdev_priv(dev);
8785
Eilon Greenstein6eccabb2009-01-22 03:37:48 +00008786 netif_carrier_off(dev);
8787
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008788 bnx2x_set_power_state(bp, PCI_D0);
8789
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008790 if (!bnx2x_reset_is_done(bp)) {
8791 do {
8792 /* Reset MCP mail box sequence if there is on going
8793 * recovery
8794 */
8795 bp->fw_seq = 0;
8796
8797 /* If it's the first function to load and reset done
8798 * is still not cleared it may mean that. We don't
8799 * check the attention state here because it may have
8800 * already been cleared by a "common" reset but we
8801 * shell proceed with "process kill" anyway.
8802 */
8803 if ((bnx2x_get_load_cnt(bp) == 0) &&
8804 bnx2x_trylock_hw_lock(bp,
8805 HW_LOCK_RESOURCE_RESERVED_08) &&
8806 (!bnx2x_leader_reset(bp))) {
8807 DP(NETIF_MSG_HW, "Recovered in open\n");
8808 break;
8809 }
8810
8811 bnx2x_set_power_state(bp, PCI_D3hot);
8812
8813 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
8814 " completed yet. Try again later. If u still see this"
8815 " message after a few retries then power cycle is"
8816 " required.\n", bp->dev->name);
8817
8818 return -EAGAIN;
8819 } while (0);
8820 }
8821
8822 bp->recovery_state = BNX2X_RECOVERY_DONE;
8823
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008824 return bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008825}
8826
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008827/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008828static int bnx2x_close(struct net_device *dev)
8829{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008830 struct bnx2x *bp = netdev_priv(dev);
8831
8832 /* Unload the driver, release IRQs */
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008833 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
Vladislav Zolotarovd3dbfee2010-04-19 01:14:49 +00008834 bnx2x_set_power_state(bp, PCI_D3hot);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008835
8836 return 0;
8837}
8838
Eilon Greensteinf5372252009-02-12 08:38:30 +00008839/* called with netif_tx_lock from dev_mcast.c */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00008840void bnx2x_set_rx_mode(struct net_device *dev)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008841{
8842 struct bnx2x *bp = netdev_priv(dev);
8843 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8844 int port = BP_PORT(bp);
8845
8846 if (bp->state != BNX2X_STATE_OPEN) {
8847 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
8848 return;
8849 }
8850
8851 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
8852
8853 if (dev->flags & IFF_PROMISC)
8854 rx_mode = BNX2X_RX_MODE_PROMISC;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008855 else if ((dev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00008856 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
8857 CHIP_IS_E1(bp)))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008858 rx_mode = BNX2X_RX_MODE_ALLMULTI;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008859 else { /* some multicasts */
8860 if (CHIP_IS_E1(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008861 /*
8862 * set mc list, do not wait as wait implies sleep
8863 * and set_rx_mode can be invoked from non-sleepable
8864 * context
8865 */
8866 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
8867 BNX2X_MAX_EMUL_MULTI*(1 + port) :
8868 BNX2X_MAX_MULTICAST*(1 + port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008869
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008870 bnx2x_set_e1_mc_list(bp, offset);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008871 } else { /* E1H */
8872 /* Accept one or more multicasts */
Jiri Pirko22bedad32010-04-01 21:22:57 +00008873 struct netdev_hw_addr *ha;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008874 u32 mc_filter[MC_HASH_SIZE];
8875 u32 crc, bit, regidx;
8876 int i;
8877
8878 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
8879
Jiri Pirko22bedad32010-04-01 21:22:57 +00008880 netdev_for_each_mc_addr(ha, dev) {
Johannes Berg7c510e42008-10-27 17:47:26 -07008881 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008882 bnx2x_mc_addr(ha));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008883
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008884 crc = crc32c_le(0, bnx2x_mc_addr(ha),
8885 ETH_ALEN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008886 bit = (crc >> 24) & 0xff;
8887 regidx = bit >> 5;
8888 bit &= 0x1f;
8889 mc_filter[regidx] |= (1 << bit);
8890 }
8891
8892 for (i = 0; i < MC_HASH_SIZE; i++)
8893 REG_WR(bp, MC_HASH_OFFSET(bp, i),
8894 mc_filter[i]);
8895 }
8896 }
8897
8898 bp->rx_mode = rx_mode;
8899 bnx2x_set_storm_rx_mode(bp);
8900}
8901
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008902/* called with rtnl_lock */
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008903static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
8904 int devad, u16 addr)
8905{
8906 struct bnx2x *bp = netdev_priv(netdev);
8907 u16 value;
8908 int rc;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008909
8910 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
8911 prtad, devad, addr);
8912
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008913 /* The HW expects different devad if CL22 is used */
8914 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8915
8916 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnere10bc842010-09-07 11:40:50 +00008917 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008918 bnx2x_release_phy_lock(bp);
8919 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
8920
8921 if (!rc)
8922 rc = value;
8923 return rc;
8924}
8925
8926/* called with rtnl_lock */
8927static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
8928 u16 addr, u16 value)
8929{
8930 struct bnx2x *bp = netdev_priv(netdev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008931 int rc;
8932
8933 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
8934 " value 0x%x\n", prtad, devad, addr, value);
8935
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008936 /* The HW expects different devad if CL22 is used */
8937 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8938
8939 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnere10bc842010-09-07 11:40:50 +00008940 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008941 bnx2x_release_phy_lock(bp);
8942 return rc;
8943}
8944
8945/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008946static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8947{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008948 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008949 struct mii_ioctl_data *mdio = if_mii(ifr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008950
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008951 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
8952 mdio->phy_id, mdio->reg_num, mdio->val_in);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008953
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008954 if (!netif_running(dev))
8955 return -EAGAIN;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008956
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008957 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008958}
8959
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +00008960#ifdef CONFIG_NET_POLL_CONTROLLER
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008961static void poll_bnx2x(struct net_device *dev)
8962{
8963 struct bnx2x *bp = netdev_priv(dev);
8964
8965 disable_irq(bp->pdev->irq);
8966 bnx2x_interrupt(bp->pdev->irq, dev);
8967 enable_irq(bp->pdev->irq);
8968}
8969#endif
8970
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008971static const struct net_device_ops bnx2x_netdev_ops = {
8972 .ndo_open = bnx2x_open,
8973 .ndo_stop = bnx2x_close,
8974 .ndo_start_xmit = bnx2x_start_xmit,
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00008975 .ndo_select_queue = bnx2x_select_queue,
Eilon Greenstein356e2382009-02-12 08:38:32 +00008976 .ndo_set_multicast_list = bnx2x_set_rx_mode,
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008977 .ndo_set_mac_address = bnx2x_change_mac_addr,
8978 .ndo_validate_addr = eth_validate_addr,
8979 .ndo_do_ioctl = bnx2x_ioctl,
8980 .ndo_change_mtu = bnx2x_change_mtu,
8981 .ndo_tx_timeout = bnx2x_tx_timeout,
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +00008982#ifdef CONFIG_NET_POLL_CONTROLLER
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008983 .ndo_poll_controller = poll_bnx2x,
8984#endif
8985};
8986
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008987static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
8988 struct net_device *dev)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008989{
8990 struct bnx2x *bp;
8991 int rc;
8992
8993 SET_NETDEV_DEV(dev, &pdev->dev);
8994 bp = netdev_priv(dev);
8995
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008996 bp->dev = dev;
8997 bp->pdev = pdev;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008998 bp->flags = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008999 bp->pf_num = PCI_FUNC(pdev->devfn);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009000
9001 rc = pci_enable_device(pdev);
9002 if (rc) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009003 dev_err(&bp->pdev->dev,
9004 "Cannot enable PCI device, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009005 goto err_out;
9006 }
9007
9008 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009009 dev_err(&bp->pdev->dev,
9010 "Cannot find PCI device base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009011 rc = -ENODEV;
9012 goto err_out_disable;
9013 }
9014
9015 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009016 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
9017 " base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009018 rc = -ENODEV;
9019 goto err_out_disable;
9020 }
9021
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009022 if (atomic_read(&pdev->enable_cnt) == 1) {
9023 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9024 if (rc) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009025 dev_err(&bp->pdev->dev,
9026 "Cannot obtain PCI resources, aborting\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009027 goto err_out_disable;
9028 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009029
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009030 pci_set_master(pdev);
9031 pci_save_state(pdev);
9032 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009033
9034 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9035 if (bp->pm_cap == 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009036 dev_err(&bp->pdev->dev,
9037 "Cannot find power management capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009038 rc = -EIO;
9039 goto err_out_release;
9040 }
9041
9042 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9043 if (bp->pcie_cap == 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009044 dev_err(&bp->pdev->dev,
9045 "Cannot find PCI Express capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009046 rc = -EIO;
9047 goto err_out_release;
9048 }
9049
FUJITA Tomonori1a983142010-04-04 01:51:03 +00009050 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009051 bp->flags |= USING_DAC_FLAG;
FUJITA Tomonori1a983142010-04-04 01:51:03 +00009052 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009053 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
9054 " failed, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009055 rc = -EIO;
9056 goto err_out_release;
9057 }
9058
FUJITA Tomonori1a983142010-04-04 01:51:03 +00009059 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009060 dev_err(&bp->pdev->dev,
9061 "System does not support DMA, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009062 rc = -EIO;
9063 goto err_out_release;
9064 }
9065
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009066 dev->mem_start = pci_resource_start(pdev, 0);
9067 dev->base_addr = dev->mem_start;
9068 dev->mem_end = pci_resource_end(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009069
9070 dev->irq = pdev->irq;
9071
Arjan van de Ven275f1652008-10-20 21:42:39 -07009072 bp->regview = pci_ioremap_bar(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009073 if (!bp->regview) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009074 dev_err(&bp->pdev->dev,
9075 "Cannot map register space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009076 rc = -ENOMEM;
9077 goto err_out_release;
9078 }
9079
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009080 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009081 min_t(u64, BNX2X_DB_SIZE(bp),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009082 pci_resource_len(pdev, 2)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009083 if (!bp->doorbells) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009084 dev_err(&bp->pdev->dev,
9085 "Cannot map doorbell space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009086 rc = -ENOMEM;
9087 goto err_out_unmap;
9088 }
9089
9090 bnx2x_set_power_state(bp, PCI_D0);
9091
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009092 /* clean indirect addresses */
9093 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
9094 PCICFG_VENDOR_ID_OFFSET);
9095 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
9096 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
9097 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
9098 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009099
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009100 /* Reset the load counter */
9101 bnx2x_clear_load_cnt(bp);
9102
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009103 dev->watchdog_timeo = TX_TIMEOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009104
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08009105 dev->netdev_ops = &bnx2x_netdev_ops;
Dmitry Kravkovde0c62d2010-07-27 12:35:24 +00009106 bnx2x_set_ethtool_ops(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009107 dev->features |= NETIF_F_SG;
Michał Mirosław79032642010-11-30 06:38:00 +00009108 dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009109 if (bp->flags & USING_DAC_FLAG)
9110 dev->features |= NETIF_F_HIGHDMA;
Eilon Greenstein5316bc02009-07-21 05:47:43 +00009111 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9112 dev->features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009113 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
Eilon Greenstein5316bc02009-07-21 05:47:43 +00009114
9115 dev->vlan_features |= NETIF_F_SG;
Michał Mirosław79032642010-11-30 06:38:00 +00009116 dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Eilon Greenstein5316bc02009-07-21 05:47:43 +00009117 if (bp->flags & USING_DAC_FLAG)
9118 dev->vlan_features |= NETIF_F_HIGHDMA;
9119 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9120 dev->vlan_features |= NETIF_F_TSO6;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009121
Shmulik Ravid785b9b12010-12-30 06:27:03 +00009122#ifdef BCM_DCB
9123 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
9124#endif
9125
Eilon Greenstein01cd4522009-08-12 08:23:08 +00009126 /* get_port_hwinfo() will set prtad and mmds properly */
9127 bp->mdio.prtad = MDIO_PRTAD_NONE;
9128 bp->mdio.mmds = 0;
9129 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
9130 bp->mdio.dev = dev;
9131 bp->mdio.mdio_read = bnx2x_mdio_read;
9132 bp->mdio.mdio_write = bnx2x_mdio_write;
9133
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009134 return 0;
9135
9136err_out_unmap:
9137 if (bp->regview) {
9138 iounmap(bp->regview);
9139 bp->regview = NULL;
9140 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009141 if (bp->doorbells) {
9142 iounmap(bp->doorbells);
9143 bp->doorbells = NULL;
9144 }
9145
9146err_out_release:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009147 if (atomic_read(&pdev->enable_cnt) == 1)
9148 pci_release_regions(pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009149
9150err_out_disable:
9151 pci_disable_device(pdev);
9152 pci_set_drvdata(pdev, NULL);
9153
9154err_out:
9155 return rc;
9156}
9157
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009158static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
9159 int *width, int *speed)
Eliezer Tamir25047952008-02-28 11:50:16 -08009160{
9161 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9162
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009163 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
9164
9165 /* return value of 1=2.5GHz 2=5GHz */
9166 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
Eliezer Tamir25047952008-02-28 11:50:16 -08009167}
9168
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009169static int bnx2x_check_firmware(struct bnx2x *bp)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009170{
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009171 const struct firmware *firmware = bp->firmware;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009172 struct bnx2x_fw_file_hdr *fw_hdr;
9173 struct bnx2x_fw_file_section *sections;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009174 u32 offset, len, num_ops;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009175 u16 *ops_offsets;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009176 int i;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009177 const u8 *fw_ver;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009178
9179 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
9180 return -EINVAL;
9181
9182 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
9183 sections = (struct bnx2x_fw_file_section *)fw_hdr;
9184
9185 /* Make sure none of the offsets and sizes make us read beyond
9186 * the end of the firmware data */
9187 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
9188 offset = be32_to_cpu(sections[i].offset);
9189 len = be32_to_cpu(sections[i].len);
9190 if (offset + len > firmware->size) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009191 dev_err(&bp->pdev->dev,
9192 "Section %d length is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009193 return -EINVAL;
9194 }
9195 }
9196
9197 /* Likewise for the init_ops offsets */
9198 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
9199 ops_offsets = (u16 *)(firmware->data + offset);
9200 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
9201
9202 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
9203 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009204 dev_err(&bp->pdev->dev,
9205 "Section offset %d is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009206 return -EINVAL;
9207 }
9208 }
9209
9210 /* Check FW version */
9211 offset = be32_to_cpu(fw_hdr->fw_version.offset);
9212 fw_ver = firmware->data + offset;
9213 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
9214 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
9215 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
9216 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009217 dev_err(&bp->pdev->dev,
9218 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009219 fw_ver[0], fw_ver[1], fw_ver[2],
9220 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
9221 BCM_5710_FW_MINOR_VERSION,
9222 BCM_5710_FW_REVISION_VERSION,
9223 BCM_5710_FW_ENGINEERING_VERSION);
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009224 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009225 }
9226
9227 return 0;
9228}
9229
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009230static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009231{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009232 const __be32 *source = (const __be32 *)_source;
9233 u32 *target = (u32 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009234 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009235
9236 for (i = 0; i < n/4; i++)
9237 target[i] = be32_to_cpu(source[i]);
9238}
9239
9240/*
9241 Ops array is stored in the following format:
9242 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
9243 */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009244static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009245{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009246 const __be32 *source = (const __be32 *)_source;
9247 struct raw_op *target = (struct raw_op *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009248 u32 i, j, tmp;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009249
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009250 for (i = 0, j = 0; i < n/8; i++, j += 2) {
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009251 tmp = be32_to_cpu(source[j]);
9252 target[i].op = (tmp >> 24) & 0xff;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009253 target[i].offset = tmp & 0xffffff;
9254 target[i].raw_data = be32_to_cpu(source[j + 1]);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009255 }
9256}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009257
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009258/**
9259 * IRO array is stored in the following format:
9260 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
9261 */
9262static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
9263{
9264 const __be32 *source = (const __be32 *)_source;
9265 struct iro *target = (struct iro *)_target;
9266 u32 i, j, tmp;
9267
9268 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
9269 target[i].base = be32_to_cpu(source[j]);
9270 j++;
9271 tmp = be32_to_cpu(source[j]);
9272 target[i].m1 = (tmp >> 16) & 0xffff;
9273 target[i].m2 = tmp & 0xffff;
9274 j++;
9275 tmp = be32_to_cpu(source[j]);
9276 target[i].m3 = (tmp >> 16) & 0xffff;
9277 target[i].size = tmp & 0xffff;
9278 j++;
9279 }
9280}
9281
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009282static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009283{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009284 const __be16 *source = (const __be16 *)_source;
9285 u16 *target = (u16 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009286 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009287
9288 for (i = 0; i < n/2; i++)
9289 target[i] = be16_to_cpu(source[i]);
9290}
9291
Joe Perches7995c642010-02-17 15:01:52 +00009292#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
9293do { \
9294 u32 len = be32_to_cpu(fw_hdr->arr.len); \
9295 bp->arr = kmalloc(len, GFP_KERNEL); \
9296 if (!bp->arr) { \
9297 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
9298 goto lbl; \
9299 } \
9300 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
9301 (u8 *)bp->arr, len); \
9302} while (0)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009303
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009304int bnx2x_init_firmware(struct bnx2x *bp)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009305{
Ben Hutchings45229b42009-11-07 11:53:39 +00009306 const char *fw_file_name;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009307 struct bnx2x_fw_file_hdr *fw_hdr;
Ben Hutchings45229b42009-11-07 11:53:39 +00009308 int rc;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009309
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009310 if (CHIP_IS_E1(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +00009311 fw_file_name = FW_FILE_NAME_E1;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009312 else if (CHIP_IS_E1H(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +00009313 fw_file_name = FW_FILE_NAME_E1H;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009314 else if (CHIP_IS_E2(bp))
9315 fw_file_name = FW_FILE_NAME_E2;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009316 else {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009317 BNX2X_ERR("Unsupported chip revision\n");
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009318 return -EINVAL;
9319 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009320
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009321 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009322
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009323 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009324 if (rc) {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009325 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009326 goto request_firmware_exit;
9327 }
9328
9329 rc = bnx2x_check_firmware(bp);
9330 if (rc) {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009331 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009332 goto request_firmware_exit;
9333 }
9334
9335 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
9336
9337 /* Initialize the pointers to the init arrays */
9338 /* Blob */
9339 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
9340
9341 /* Opcodes */
9342 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
9343
9344 /* Offsets */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009345 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
9346 be16_to_cpu_n);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009347
9348 /* STORMs firmware */
Eilon Greenstein573f2032009-08-12 08:24:14 +00009349 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9350 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
9351 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
9352 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
9353 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9354 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
9355 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
9356 be32_to_cpu(fw_hdr->usem_pram_data.offset);
9357 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9358 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
9359 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
9360 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
9361 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9362 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
9363 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
9364 be32_to_cpu(fw_hdr->csem_pram_data.offset);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009365 /* IRO */
9366 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009367
9368 return 0;
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009369
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009370iro_alloc_err:
9371 kfree(bp->init_ops_offsets);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009372init_offsets_alloc_err:
9373 kfree(bp->init_ops);
9374init_ops_alloc_err:
9375 kfree(bp->init_data);
9376request_firmware_exit:
9377 release_firmware(bp->firmware);
9378
9379 return rc;
9380}
9381
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009382static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
9383{
9384 int cid_count = L2_FP_COUNT(l2_cid_count);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009385
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009386#ifdef BCM_CNIC
9387 cid_count += CNIC_CID_MAX;
9388#endif
9389 return roundup(cid_count, QM_CID_ROUND);
9390}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00009391
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009392static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9393 const struct pci_device_id *ent)
9394{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009395 struct net_device *dev = NULL;
9396 struct bnx2x *bp;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009397 int pcie_width, pcie_speed;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009398 int rc, cid_count;
9399
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009400 switch (ent->driver_data) {
9401 case BCM57710:
9402 case BCM57711:
9403 case BCM57711E:
9404 cid_count = FP_SB_MAX_E1x;
9405 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009406
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009407 case BCM57712:
9408 case BCM57712E:
9409 cid_count = FP_SB_MAX_E2;
9410 break;
9411
9412 default:
9413 pr_err("Unknown board_type (%ld), aborting\n",
9414 ent->driver_data);
Vasiliy Kulikov870634b2010-11-14 10:08:34 +00009415 return -ENODEV;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009416 }
9417
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00009418 cid_count += NONE_ETH_CONTEXT_USE + CNIC_CONTEXT_USE;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00009419
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009420 /* dev zeroed in init_etherdev */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009421 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009422 if (!dev) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009423 dev_err(&pdev->dev, "Cannot allocate net device\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009424 return -ENOMEM;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009425 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009426
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009427 bp = netdev_priv(dev);
Joe Perches7995c642010-02-17 15:01:52 +00009428 bp->msg_enable = debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009429
Eilon Greensteindf4770de2009-08-12 08:23:28 +00009430 pci_set_drvdata(pdev, dev);
9431
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009432 bp->l2_cid_count = cid_count;
9433
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009434 rc = bnx2x_init_dev(pdev, dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009435 if (rc < 0) {
9436 free_netdev(dev);
9437 return rc;
9438 }
9439
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009440 rc = bnx2x_init_bp(bp);
Eilon Greenstein693fc0d2009-01-14 06:43:52 +00009441 if (rc)
9442 goto init_one_exit;
9443
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009444 /* calc qm_cid_count */
9445 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9446
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00009447#ifdef BCM_CNIC
9448 /* disable FCOE L2 queue for E1x*/
9449 if (CHIP_IS_E1x(bp))
9450 bp->flags |= NO_FCOE_FLAG;
9451
9452#endif
9453
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009454 /* Configure interupt mode: try to enable MSI-X/MSI if
9455 * needed, set bp->num_queues appropriately.
9456 */
9457 bnx2x_set_int_mode(bp);
9458
9459 /* Add all NAPI objects */
9460 bnx2x_add_all_napi(bp);
9461
Vladislav Zolotarovb3400072010-11-24 11:09:50 -08009462 rc = register_netdev(dev);
9463 if (rc) {
9464 dev_err(&pdev->dev, "Cannot register net device\n");
9465 goto init_one_exit;
9466 }
9467
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00009468#ifdef BCM_CNIC
9469 if (!NO_FCOE(bp)) {
9470 /* Add storage MAC address */
9471 rtnl_lock();
9472 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
9473 rtnl_unlock();
9474 }
9475#endif
9476
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009477 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009478
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009479 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9480 " IRQ %d, ", board_info[ent->driver_data].name,
9481 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009482 pcie_width,
9483 ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9484 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9485 "5GHz (Gen2)" : "2.5GHz",
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009486 dev->base_addr, bp->pdev->irq);
9487 pr_cont("node addr %pM\n", dev->dev_addr);
Eilon Greensteinc0162012009-03-02 08:01:05 +00009488
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009489 return 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009490
9491init_one_exit:
9492 if (bp->regview)
9493 iounmap(bp->regview);
9494
9495 if (bp->doorbells)
9496 iounmap(bp->doorbells);
9497
9498 free_netdev(dev);
9499
9500 if (atomic_read(&pdev->enable_cnt) == 1)
9501 pci_release_regions(pdev);
9502
9503 pci_disable_device(pdev);
9504 pci_set_drvdata(pdev, NULL);
9505
9506 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009507}
9508
9509static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9510{
9511 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -08009512 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009513
Eliezer Tamir228241e2008-02-28 11:56:57 -08009514 if (!dev) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009515 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
Eliezer Tamir228241e2008-02-28 11:56:57 -08009516 return;
9517 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08009518 bp = netdev_priv(dev);
9519
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00009520#ifdef BCM_CNIC
9521 /* Delete storage MAC address */
9522 if (!NO_FCOE(bp)) {
9523 rtnl_lock();
9524 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
9525 rtnl_unlock();
9526 }
9527#endif
9528
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009529 unregister_netdev(dev);
9530
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009531 /* Delete all NAPI objects */
9532 bnx2x_del_all_napi(bp);
9533
Vladislav Zolotarov084d6cb2011-01-09 02:20:19 +00009534 /* Power on: we can't let PCI layer write to us while we are in D3 */
9535 bnx2x_set_power_state(bp, PCI_D0);
9536
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009537 /* Disable MSI/MSI-X */
9538 bnx2x_disable_msi(bp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00009539
Vladislav Zolotarov084d6cb2011-01-09 02:20:19 +00009540 /* Power off */
9541 bnx2x_set_power_state(bp, PCI_D3hot);
9542
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009543 /* Make sure RESET task is not scheduled before continuing */
9544 cancel_delayed_work_sync(&bp->reset_task);
9545
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009546 if (bp->regview)
9547 iounmap(bp->regview);
9548
9549 if (bp->doorbells)
9550 iounmap(bp->doorbells);
9551
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009552 bnx2x_free_mem_bp(bp);
9553
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009554 free_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009555
9556 if (atomic_read(&pdev->enable_cnt) == 1)
9557 pci_release_regions(pdev);
9558
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009559 pci_disable_device(pdev);
9560 pci_set_drvdata(pdev, NULL);
9561}
9562
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009563static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
9564{
9565 int i;
9566
9567 bp->state = BNX2X_STATE_ERROR;
9568
9569 bp->rx_mode = BNX2X_RX_MODE_NONE;
9570
9571 bnx2x_netif_stop(bp, 0);
Stanislaw Gruszkac89af1a2010-05-17 17:35:38 -07009572 netif_carrier_off(bp->dev);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009573
9574 del_timer_sync(&bp->timer);
9575 bp->stats_state = STATS_STATE_DISABLED;
9576 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
9577
9578 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009579 bnx2x_free_irq(bp);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009580
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009581 /* Free SKBs, SGEs, TPA pool and driver internals */
9582 bnx2x_free_skbs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009583
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00009584 for_each_rx_queue(bp, i)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009585 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009586
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009587 bnx2x_free_mem(bp);
9588
9589 bp->state = BNX2X_STATE_CLOSED;
9590
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009591 return 0;
9592}
9593
9594static void bnx2x_eeh_recover(struct bnx2x *bp)
9595{
9596 u32 val;
9597
9598 mutex_init(&bp->port.phy_mutex);
9599
9600 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9601 bp->link_params.shmem_base = bp->common.shmem_base;
9602 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
9603
9604 if (!bp->common.shmem_base ||
9605 (bp->common.shmem_base < 0xA0000) ||
9606 (bp->common.shmem_base >= 0xC0000)) {
9607 BNX2X_DEV_INFO("MCP not active\n");
9608 bp->flags |= NO_MCP_FLAG;
9609 return;
9610 }
9611
9612 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9613 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9614 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9615 BNX2X_ERR("BAD MCP validity signature\n");
9616
9617 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009618 bp->fw_seq =
9619 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9620 DRV_MSG_SEQ_NUMBER_MASK);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009621 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9622 }
9623}
9624
Wendy Xiong493adb12008-06-23 20:36:22 -07009625/**
9626 * bnx2x_io_error_detected - called when PCI error is detected
9627 * @pdev: Pointer to PCI device
9628 * @state: The current pci connection state
9629 *
9630 * This function is called after a PCI bus error affecting
9631 * this device has been detected.
9632 */
9633static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
9634 pci_channel_state_t state)
9635{
9636 struct net_device *dev = pci_get_drvdata(pdev);
9637 struct bnx2x *bp = netdev_priv(dev);
9638
9639 rtnl_lock();
9640
9641 netif_device_detach(dev);
9642
Dean Nelson07ce50e42009-07-31 09:13:25 +00009643 if (state == pci_channel_io_perm_failure) {
9644 rtnl_unlock();
9645 return PCI_ERS_RESULT_DISCONNECT;
9646 }
9647
Wendy Xiong493adb12008-06-23 20:36:22 -07009648 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009649 bnx2x_eeh_nic_unload(bp);
Wendy Xiong493adb12008-06-23 20:36:22 -07009650
9651 pci_disable_device(pdev);
9652
9653 rtnl_unlock();
9654
9655 /* Request a slot reset */
9656 return PCI_ERS_RESULT_NEED_RESET;
9657}
9658
9659/**
9660 * bnx2x_io_slot_reset - called after the PCI bus has been reset
9661 * @pdev: Pointer to PCI device
9662 *
9663 * Restart the card from scratch, as if from a cold-boot.
9664 */
9665static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
9666{
9667 struct net_device *dev = pci_get_drvdata(pdev);
9668 struct bnx2x *bp = netdev_priv(dev);
9669
9670 rtnl_lock();
9671
9672 if (pci_enable_device(pdev)) {
9673 dev_err(&pdev->dev,
9674 "Cannot re-enable PCI device after reset\n");
9675 rtnl_unlock();
9676 return PCI_ERS_RESULT_DISCONNECT;
9677 }
9678
9679 pci_set_master(pdev);
9680 pci_restore_state(pdev);
9681
9682 if (netif_running(dev))
9683 bnx2x_set_power_state(bp, PCI_D0);
9684
9685 rtnl_unlock();
9686
9687 return PCI_ERS_RESULT_RECOVERED;
9688}
9689
9690/**
9691 * bnx2x_io_resume - called when traffic can start flowing again
9692 * @pdev: Pointer to PCI device
9693 *
9694 * This callback is called when the error recovery driver tells us that
9695 * its OK to resume normal operation.
9696 */
9697static void bnx2x_io_resume(struct pci_dev *pdev)
9698{
9699 struct net_device *dev = pci_get_drvdata(pdev);
9700 struct bnx2x *bp = netdev_priv(dev);
9701
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009702 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009703 printk(KERN_ERR "Handling parity error recovery. "
9704 "Try again later\n");
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009705 return;
9706 }
9707
Wendy Xiong493adb12008-06-23 20:36:22 -07009708 rtnl_lock();
9709
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009710 bnx2x_eeh_recover(bp);
9711
Wendy Xiong493adb12008-06-23 20:36:22 -07009712 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009713 bnx2x_nic_load(bp, LOAD_NORMAL);
Wendy Xiong493adb12008-06-23 20:36:22 -07009714
9715 netif_device_attach(dev);
9716
9717 rtnl_unlock();
9718}
9719
9720static struct pci_error_handlers bnx2x_err_handler = {
9721 .error_detected = bnx2x_io_error_detected,
Eilon Greenstein356e2382009-02-12 08:38:32 +00009722 .slot_reset = bnx2x_io_slot_reset,
9723 .resume = bnx2x_io_resume,
Wendy Xiong493adb12008-06-23 20:36:22 -07009724};
9725
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009726static struct pci_driver bnx2x_pci_driver = {
Wendy Xiong493adb12008-06-23 20:36:22 -07009727 .name = DRV_MODULE_NAME,
9728 .id_table = bnx2x_pci_tbl,
9729 .probe = bnx2x_init_one,
9730 .remove = __devexit_p(bnx2x_remove_one),
9731 .suspend = bnx2x_suspend,
9732 .resume = bnx2x_resume,
9733 .err_handler = &bnx2x_err_handler,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009734};
9735
9736static int __init bnx2x_init(void)
9737{
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009738 int ret;
9739
Joe Perches7995c642010-02-17 15:01:52 +00009740 pr_info("%s", version);
Eilon Greenstein938cf542009-08-12 08:23:37 +00009741
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009742 bnx2x_wq = create_singlethread_workqueue("bnx2x");
9743 if (bnx2x_wq == NULL) {
Joe Perches7995c642010-02-17 15:01:52 +00009744 pr_err("Cannot create workqueue\n");
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009745 return -ENOMEM;
9746 }
9747
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009748 ret = pci_register_driver(&bnx2x_pci_driver);
9749 if (ret) {
Joe Perches7995c642010-02-17 15:01:52 +00009750 pr_err("Cannot register driver\n");
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009751 destroy_workqueue(bnx2x_wq);
9752 }
9753 return ret;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009754}
9755
9756static void __exit bnx2x_cleanup(void)
9757{
9758 pci_unregister_driver(&bnx2x_pci_driver);
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009759
9760 destroy_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009761}
9762
9763module_init(bnx2x_init);
9764module_exit(bnx2x_cleanup);
9765
Michael Chan993ac7b2009-10-10 13:46:56 +00009766#ifdef BCM_CNIC
9767
9768/* count denotes the number of new completions we have seen */
9769static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9770{
9771 struct eth_spe *spe;
9772
9773#ifdef BNX2X_STOP_ON_ERROR
9774 if (unlikely(bp->panic))
9775 return;
9776#endif
9777
9778 spin_lock_bh(&bp->spq_lock);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009779 BUG_ON(bp->cnic_spq_pending < count);
Michael Chan993ac7b2009-10-10 13:46:56 +00009780 bp->cnic_spq_pending -= count;
9781
Michael Chan993ac7b2009-10-10 13:46:56 +00009782
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009783 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
9784 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
9785 & SPE_HDR_CONN_TYPE) >>
9786 SPE_HDR_CONN_TYPE_SHIFT;
9787
9788 /* Set validation for iSCSI L2 client before sending SETUP
9789 * ramrod
9790 */
9791 if (type == ETH_CONNECTION_TYPE) {
9792 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
9793 hdr.conn_and_cmd_data) >>
9794 SPE_HDR_CMD_ID_SHIFT) & 0xff;
9795
9796 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
9797 bnx2x_set_ctx_validation(&bp->context.
9798 vcxt[BNX2X_ISCSI_ETH_CID].eth,
9799 HW_CID(bp, BNX2X_ISCSI_ETH_CID));
9800 }
9801
9802 /* There may be not more than 8 L2 and COMMON SPEs and not more
9803 * than 8 L5 SPEs in the air.
9804 */
9805 if ((type == NONE_CONNECTION_TYPE) ||
9806 (type == ETH_CONNECTION_TYPE)) {
9807 if (!atomic_read(&bp->spq_left))
9808 break;
9809 else
9810 atomic_dec(&bp->spq_left);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00009811 } else if ((type == ISCSI_CONNECTION_TYPE) ||
9812 (type == FCOE_CONNECTION_TYPE)) {
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009813 if (bp->cnic_spq_pending >=
9814 bp->cnic_eth_dev.max_kwqe_pending)
9815 break;
9816 else
9817 bp->cnic_spq_pending++;
9818 } else {
9819 BNX2X_ERR("Unknown SPE type: %d\n", type);
9820 bnx2x_panic();
Michael Chan993ac7b2009-10-10 13:46:56 +00009821 break;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009822 }
Michael Chan993ac7b2009-10-10 13:46:56 +00009823
9824 spe = bnx2x_sp_get_next(bp);
9825 *spe = *bp->cnic_kwq_cons;
9826
Michael Chan993ac7b2009-10-10 13:46:56 +00009827 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
9828 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
9829
9830 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
9831 bp->cnic_kwq_cons = bp->cnic_kwq;
9832 else
9833 bp->cnic_kwq_cons++;
9834 }
9835 bnx2x_sp_prod_update(bp);
9836 spin_unlock_bh(&bp->spq_lock);
9837}
9838
9839static int bnx2x_cnic_sp_queue(struct net_device *dev,
9840 struct kwqe_16 *kwqes[], u32 count)
9841{
9842 struct bnx2x *bp = netdev_priv(dev);
9843 int i;
9844
9845#ifdef BNX2X_STOP_ON_ERROR
9846 if (unlikely(bp->panic))
9847 return -EIO;
9848#endif
9849
9850 spin_lock_bh(&bp->spq_lock);
9851
9852 for (i = 0; i < count; i++) {
9853 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
9854
9855 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
9856 break;
9857
9858 *bp->cnic_kwq_prod = *spe;
9859
9860 bp->cnic_kwq_pending++;
9861
9862 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
9863 spe->hdr.conn_and_cmd_data, spe->hdr.type,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009864 spe->data.update_data_addr.hi,
9865 spe->data.update_data_addr.lo,
Michael Chan993ac7b2009-10-10 13:46:56 +00009866 bp->cnic_kwq_pending);
9867
9868 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
9869 bp->cnic_kwq_prod = bp->cnic_kwq;
9870 else
9871 bp->cnic_kwq_prod++;
9872 }
9873
9874 spin_unlock_bh(&bp->spq_lock);
9875
9876 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
9877 bnx2x_cnic_sp_post(bp, 0);
9878
9879 return i;
9880}
9881
9882static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9883{
9884 struct cnic_ops *c_ops;
9885 int rc = 0;
9886
9887 mutex_lock(&bp->cnic_mutex);
9888 c_ops = bp->cnic_ops;
9889 if (c_ops)
9890 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9891 mutex_unlock(&bp->cnic_mutex);
9892
9893 return rc;
9894}
9895
9896static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9897{
9898 struct cnic_ops *c_ops;
9899 int rc = 0;
9900
9901 rcu_read_lock();
9902 c_ops = rcu_dereference(bp->cnic_ops);
9903 if (c_ops)
9904 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9905 rcu_read_unlock();
9906
9907 return rc;
9908}
9909
9910/*
9911 * for commands that have no data
9912 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00009913int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
Michael Chan993ac7b2009-10-10 13:46:56 +00009914{
9915 struct cnic_ctl_info ctl = {0};
9916
9917 ctl.cmd = cmd;
9918
9919 return bnx2x_cnic_ctl_send(bp, &ctl);
9920}
9921
9922static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
9923{
9924 struct cnic_ctl_info ctl;
9925
9926 /* first we tell CNIC and only then we count this as a completion */
9927 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
9928 ctl.data.comp.cid = cid;
9929
9930 bnx2x_cnic_ctl_send_bh(bp, &ctl);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009931 bnx2x_cnic_sp_post(bp, 0);
Michael Chan993ac7b2009-10-10 13:46:56 +00009932}
9933
9934static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
9935{
9936 struct bnx2x *bp = netdev_priv(dev);
9937 int rc = 0;
9938
9939 switch (ctl->cmd) {
9940 case DRV_CTL_CTXTBL_WR_CMD: {
9941 u32 index = ctl->data.io.offset;
9942 dma_addr_t addr = ctl->data.io.dma_addr;
9943
9944 bnx2x_ilt_wr(bp, index, addr);
9945 break;
9946 }
9947
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009948 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
9949 int count = ctl->data.credit.credit_count;
Michael Chan993ac7b2009-10-10 13:46:56 +00009950
9951 bnx2x_cnic_sp_post(bp, count);
9952 break;
9953 }
9954
9955 /* rtnl_lock is held. */
9956 case DRV_CTL_START_L2_CMD: {
9957 u32 cli = ctl->data.ring.client_id;
9958
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00009959 /* Clear FCoE FIP and ALL ENODE MACs addresses first */
9960 bnx2x_del_fcoe_eth_macs(bp);
9961
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009962 /* Set iSCSI MAC address */
9963 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
9964
9965 mmiowb();
9966 barrier();
9967
9968 /* Start accepting on iSCSI L2 ring. Accept all multicasts
9969 * because it's the only way for UIO Client to accept
9970 * multicasts (in non-promiscuous mode only one Client per
9971 * function will receive multicast packets (leading in our
9972 * case).
9973 */
9974 bnx2x_rxq_set_mac_filters(bp, cli,
9975 BNX2X_ACCEPT_UNICAST |
9976 BNX2X_ACCEPT_BROADCAST |
9977 BNX2X_ACCEPT_ALL_MULTICAST);
9978 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9979
Michael Chan993ac7b2009-10-10 13:46:56 +00009980 break;
9981 }
9982
9983 /* rtnl_lock is held. */
9984 case DRV_CTL_STOP_L2_CMD: {
9985 u32 cli = ctl->data.ring.client_id;
9986
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009987 /* Stop accepting on iSCSI L2 ring */
9988 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
9989 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9990
9991 mmiowb();
9992 barrier();
9993
9994 /* Unset iSCSI L2 MAC */
9995 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
Michael Chan993ac7b2009-10-10 13:46:56 +00009996 break;
9997 }
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009998 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
9999 int count = ctl->data.credit.credit_count;
10000
10001 smp_mb__before_atomic_inc();
10002 atomic_add(count, &bp->spq_left);
10003 smp_mb__after_atomic_inc();
10004 break;
10005 }
Michael Chan993ac7b2009-10-10 13:46:56 +000010006
10007 default:
10008 BNX2X_ERR("unknown command %x\n", ctl->cmd);
10009 rc = -EINVAL;
10010 }
10011
10012 return rc;
10013}
10014
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000010015void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
Michael Chan993ac7b2009-10-10 13:46:56 +000010016{
10017 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10018
10019 if (bp->flags & USING_MSIX_FLAG) {
10020 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
10021 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
10022 cp->irq_arr[0].vector = bp->msix_table[1].vector;
10023 } else {
10024 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
10025 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
10026 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000010027 if (CHIP_IS_E2(bp))
10028 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
10029 else
10030 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
10031
Michael Chan993ac7b2009-10-10 13:46:56 +000010032 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +000010033 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
Michael Chan993ac7b2009-10-10 13:46:56 +000010034 cp->irq_arr[1].status_blk = bp->def_status_blk;
10035 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
Dmitry Kravkov523224a2010-10-06 03:23:26 +000010036 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
Michael Chan993ac7b2009-10-10 13:46:56 +000010037
10038 cp->num_irq = 2;
10039}
10040
10041static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
10042 void *data)
10043{
10044 struct bnx2x *bp = netdev_priv(dev);
10045 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10046
10047 if (ops == NULL)
10048 return -EINVAL;
10049
10050 if (atomic_read(&bp->intr_sem) != 0)
10051 return -EBUSY;
10052
10053 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
10054 if (!bp->cnic_kwq)
10055 return -ENOMEM;
10056
10057 bp->cnic_kwq_cons = bp->cnic_kwq;
10058 bp->cnic_kwq_prod = bp->cnic_kwq;
10059 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
10060
10061 bp->cnic_spq_pending = 0;
10062 bp->cnic_kwq_pending = 0;
10063
10064 bp->cnic_data = data;
10065
10066 cp->num_irq = 0;
10067 cp->drv_state = CNIC_DRV_STATE_REGD;
Dmitry Kravkov523224a2010-10-06 03:23:26 +000010068 cp->iro_arr = bp->iro_arr;
Michael Chan993ac7b2009-10-10 13:46:56 +000010069
Michael Chan993ac7b2009-10-10 13:46:56 +000010070 bnx2x_setup_cnic_irq_info(bp);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +000010071
Michael Chan993ac7b2009-10-10 13:46:56 +000010072 rcu_assign_pointer(bp->cnic_ops, ops);
10073
10074 return 0;
10075}
10076
10077static int bnx2x_unregister_cnic(struct net_device *dev)
10078{
10079 struct bnx2x *bp = netdev_priv(dev);
10080 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10081
10082 mutex_lock(&bp->cnic_mutex);
Michael Chan993ac7b2009-10-10 13:46:56 +000010083 cp->drv_state = 0;
10084 rcu_assign_pointer(bp->cnic_ops, NULL);
10085 mutex_unlock(&bp->cnic_mutex);
10086 synchronize_rcu();
10087 kfree(bp->cnic_kwq);
10088 bp->cnic_kwq = NULL;
10089
10090 return 0;
10091}
10092
10093struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
10094{
10095 struct bnx2x *bp = netdev_priv(dev);
10096 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10097
10098 cp->drv_owner = THIS_MODULE;
10099 cp->chip_id = CHIP_ID(bp);
10100 cp->pdev = bp->pdev;
10101 cp->io_base = bp->regview;
10102 cp->io_base2 = bp->doorbells;
10103 cp->max_kwqe_pending = 8;
Dmitry Kravkov523224a2010-10-06 03:23:26 +000010104 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +000010105 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
10106 bnx2x_cid_ilt_lines(bp);
Michael Chan993ac7b2009-10-10 13:46:56 +000010107 cp->ctx_tbl_len = CNIC_ILT_LINES;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +000010108 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
Michael Chan993ac7b2009-10-10 13:46:56 +000010109 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
10110 cp->drv_ctl = bnx2x_drv_ctl;
10111 cp->drv_register_cnic = bnx2x_register_cnic;
10112 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +000010113 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
10114 cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID +
10115 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +000010116 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
Michael Chan993ac7b2009-10-10 13:46:56 +000010117
Dmitry Kravkovc2bff632010-10-06 03:33:18 +000010118 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
10119 "starting cid %d\n",
10120 cp->ctx_blk_size,
10121 cp->ctx_tbl_offset,
10122 cp->ctx_tbl_len,
10123 cp->starting_cid);
Michael Chan993ac7b2009-10-10 13:46:56 +000010124 return cp;
10125}
10126EXPORT_SYMBOL(bnx2x_cnic_probe);
10127
10128#endif /* BCM_CNIC */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070010129