blob: 119ca871f016fc534ccad54afdebec39b7ae0c74 [file] [log] [blame]
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001/* bnx2x_main.c: Broadcom Everest network driver.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002 *
Vladislav Zolotarov3359fce2010-02-17 13:35:01 -08003 * Copyright (c) 2007-2010 Broadcom Corporation
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
Eilon Greenstein24e3fce2008-06-12 14:30:28 -07009 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
Eilon Greensteinca003922009-08-12 22:53:28 -070013 * Slowpath and fastpath rework by Vladislav Zolotarov
Eliezer Tamirc14423f2008-02-28 11:49:42 -080014 * Statistics and Link management by Yitchak Gertner
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020015 *
16 */
17
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020018#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080041#include <linux/if_vlan.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020042#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070045#include <net/ip6_checksum.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020046#include <linux/workqueue.h>
47#include <linux/crc32.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070048#include <linux/crc32c.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020049#include <linux/prefetch.h>
50#include <linux/zlib.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020051#include <linux/io.h>
Ben Hutchings45229b42009-11-07 11:53:39 +000052#include <linux/stringify.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020053
Dmitry Kravkovb0efbb92010-07-27 12:33:43 +000054#define BNX2X_MAIN
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020055#include "bnx2x.h"
56#include "bnx2x_init.h"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070057#include "bnx2x_init_ops.h"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000058#include "bnx2x_cmn.h"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020059
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020060
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070061#include <linux/firmware.h>
62#include "bnx2x_fw_file_hdr.h"
63/* FW files */
Ben Hutchings45229b42009-11-07 11:53:39 +000064#define FW_FILE_VERSION \
65 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
66 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
67 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
68 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
Dmitry Kravkov560131f2010-10-06 03:18:47 +000069#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
70#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070071
Eilon Greenstein34f80b02008-06-23 20:33:01 -070072/* Time in jiffies before concluding the transmitter is hung */
73#define TX_TIMEOUT (5*HZ)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020074
Andrew Morton53a10562008-02-09 23:16:41 -080075static char version[] __devinitdata =
Eilon Greenstein34f80b02008-06-23 20:33:01 -070076 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020077 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78
Eilon Greenstein24e3fce2008-06-12 14:30:28 -070079MODULE_AUTHOR("Eliezer Tamir");
Eilon Greensteine47d7e62009-01-14 06:44:28 +000080MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020081MODULE_LICENSE("GPL");
82MODULE_VERSION(DRV_MODULE_VERSION);
Ben Hutchings45229b42009-11-07 11:53:39 +000083MODULE_FIRMWARE(FW_FILE_NAME_E1);
84MODULE_FIRMWARE(FW_FILE_NAME_E1H);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020085
Eilon Greenstein555f6c72009-02-12 08:36:11 +000086static int multi_mode = 1;
87module_param(multi_mode, int, 0);
Eilon Greensteinca003922009-08-12 22:53:28 -070088MODULE_PARM_DESC(multi_mode, " Multi queue mode "
89 "(0 Disable; 1 Enable (default))");
90
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000091static int num_queues;
92module_param(num_queues, int, 0);
93MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
94 " (default is as a number of CPUs)");
Eilon Greenstein555f6c72009-02-12 08:36:11 +000095
Eilon Greenstein19680c42008-08-13 15:47:33 -070096static int disable_tpa;
Eilon Greenstein19680c42008-08-13 15:47:33 -070097module_param(disable_tpa, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +000098MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
Eilon Greenstein8badd272009-02-12 08:36:15 +000099
100static int int_mode;
101module_param(int_mode, int, 0);
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000102MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
103 "(1 INT#x; 2 MSI)");
Eilon Greenstein8badd272009-02-12 08:36:15 +0000104
Eilon Greensteina18f5122009-08-12 08:23:26 +0000105static int dropless_fc;
106module_param(dropless_fc, int, 0);
107MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
108
Eilon Greenstein9898f862009-02-12 08:38:27 +0000109static int poll;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200110module_param(poll, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000111MODULE_PARM_DESC(poll, " Use polling (for debug)");
Eilon Greenstein8d5726c2009-02-12 08:37:19 +0000112
113static int mrrs = -1;
114module_param(mrrs, int, 0);
115MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
116
Eilon Greenstein9898f862009-02-12 08:38:27 +0000117static int debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200118module_param(debug, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000119MODULE_PARM_DESC(debug, " Default debug msglevel");
120
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800121static struct workqueue_struct *bnx2x_wq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200122
123enum bnx2x_board_type {
124 BCM57710 = 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700125 BCM57711 = 1,
126 BCM57711E = 2,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200127};
128
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700129/* indexed by board_type, above */
Andrew Morton53a10562008-02-09 23:16:41 -0800130static struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200131 char *name;
132} board_info[] __devinitdata = {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700133 { "Broadcom NetXtreme II BCM57710 XGb" },
134 { "Broadcom NetXtreme II BCM57711 XGb" },
135 { "Broadcom NetXtreme II BCM57711E XGb" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200136};
137
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700138
Alexey Dobriyana3aa1882010-01-07 11:58:11 +0000139static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
Eilon Greensteine4ed7112009-08-12 08:24:10 +0000140 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200143 { 0 }
144};
145
146MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
147
148/****************************************************************************
149* General service functions
150****************************************************************************/
151
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000152static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
153 u32 addr, dma_addr_t mapping)
154{
155 REG_WR(bp, addr, U64_LO(mapping));
156 REG_WR(bp, addr + 4, U64_HI(mapping));
157}
158
159static inline void __storm_memset_fill(struct bnx2x *bp,
160 u32 addr, size_t size, u32 val)
161{
162 int i;
163 for (i = 0; i < size/4; i++)
164 REG_WR(bp, addr + (i * 4), val);
165}
166
167static inline void storm_memset_ustats_zero(struct bnx2x *bp,
168 u8 port, u16 stat_id)
169{
170 size_t size = sizeof(struct ustorm_per_client_stats);
171
172 u32 addr = BAR_USTRORM_INTMEM +
173 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
174
175 __storm_memset_fill(bp, addr, size, 0);
176}
177
178static inline void storm_memset_tstats_zero(struct bnx2x *bp,
179 u8 port, u16 stat_id)
180{
181 size_t size = sizeof(struct tstorm_per_client_stats);
182
183 u32 addr = BAR_TSTRORM_INTMEM +
184 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
185
186 __storm_memset_fill(bp, addr, size, 0);
187}
188
189static inline void storm_memset_xstats_zero(struct bnx2x *bp,
190 u8 port, u16 stat_id)
191{
192 size_t size = sizeof(struct xstorm_per_client_stats);
193
194 u32 addr = BAR_XSTRORM_INTMEM +
195 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
196
197 __storm_memset_fill(bp, addr, size, 0);
198}
199
200
201static inline void storm_memset_spq_addr(struct bnx2x *bp,
202 dma_addr_t mapping, u16 abs_fid)
203{
204 u32 addr = XSEM_REG_FAST_MEMORY +
205 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
206
207 __storm_memset_dma_mapping(bp, addr, mapping);
208}
209
210static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
211{
212 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
213}
214
215static inline void storm_memset_func_cfg(struct bnx2x *bp,
216 struct tstorm_eth_function_common_config *tcfg,
217 u16 abs_fid)
218{
219 size_t size = sizeof(struct tstorm_eth_function_common_config);
220
221 u32 addr = BAR_TSTRORM_INTMEM +
222 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
223
224 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
225}
226
227static inline void storm_memset_xstats_flags(struct bnx2x *bp,
228 struct stats_indication_flags *flags,
229 u16 abs_fid)
230{
231 size_t size = sizeof(struct stats_indication_flags);
232
233 u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
234
235 __storm_memset_struct(bp, addr, size, (u32 *)flags);
236}
237
238static inline void storm_memset_tstats_flags(struct bnx2x *bp,
239 struct stats_indication_flags *flags,
240 u16 abs_fid)
241{
242 size_t size = sizeof(struct stats_indication_flags);
243
244 u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
245
246 __storm_memset_struct(bp, addr, size, (u32 *)flags);
247}
248
249static inline void storm_memset_ustats_flags(struct bnx2x *bp,
250 struct stats_indication_flags *flags,
251 u16 abs_fid)
252{
253 size_t size = sizeof(struct stats_indication_flags);
254
255 u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
256
257 __storm_memset_struct(bp, addr, size, (u32 *)flags);
258}
259
260static inline void storm_memset_cstats_flags(struct bnx2x *bp,
261 struct stats_indication_flags *flags,
262 u16 abs_fid)
263{
264 size_t size = sizeof(struct stats_indication_flags);
265
266 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
267
268 __storm_memset_struct(bp, addr, size, (u32 *)flags);
269}
270
271static inline void storm_memset_xstats_addr(struct bnx2x *bp,
272 dma_addr_t mapping, u16 abs_fid)
273{
274 u32 addr = BAR_XSTRORM_INTMEM +
275 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
276
277 __storm_memset_dma_mapping(bp, addr, mapping);
278}
279
280static inline void storm_memset_tstats_addr(struct bnx2x *bp,
281 dma_addr_t mapping, u16 abs_fid)
282{
283 u32 addr = BAR_TSTRORM_INTMEM +
284 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
285
286 __storm_memset_dma_mapping(bp, addr, mapping);
287}
288
289static inline void storm_memset_ustats_addr(struct bnx2x *bp,
290 dma_addr_t mapping, u16 abs_fid)
291{
292 u32 addr = BAR_USTRORM_INTMEM +
293 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
294
295 __storm_memset_dma_mapping(bp, addr, mapping);
296}
297
298static inline void storm_memset_cstats_addr(struct bnx2x *bp,
299 dma_addr_t mapping, u16 abs_fid)
300{
301 u32 addr = BAR_CSTRORM_INTMEM +
302 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
303
304 __storm_memset_dma_mapping(bp, addr, mapping);
305}
306
307static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
308 u16 pf_id)
309{
310 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
311 pf_id);
312 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
313 pf_id);
314 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
315 pf_id);
316 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
317 pf_id);
318}
319
320static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
321 u8 enable)
322{
323 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
324 enable);
325 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
326 enable);
327 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
328 enable);
329 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
330 enable);
331}
332
333static inline void storm_memset_eq_data(struct bnx2x *bp,
334 struct event_ring_data *eq_data,
335 u16 pfid)
336{
337 size_t size = sizeof(struct event_ring_data);
338
339 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
340
341 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
342}
343
344static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
345 u16 pfid)
346{
347 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
348 REG_WR16(bp, addr, eq_prod);
349}
350
351static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
352 u16 fw_sb_id, u8 sb_index,
353 u8 ticks)
354{
355
356 int index_offset =
357 offsetof(struct hc_status_block_data_e1x, index_data);
358 u32 addr = BAR_CSTRORM_INTMEM +
359 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
360 index_offset +
361 sizeof(struct hc_index_data)*sb_index +
362 offsetof(struct hc_index_data, timeout);
363 REG_WR8(bp, addr, ticks);
364 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
365 port, fw_sb_id, sb_index, ticks);
366}
367static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
368 u16 fw_sb_id, u8 sb_index,
369 u8 disable)
370{
371 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
372 int index_offset =
373 offsetof(struct hc_status_block_data_e1x, index_data);
374 u32 addr = BAR_CSTRORM_INTMEM +
375 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
376 index_offset +
377 sizeof(struct hc_index_data)*sb_index +
378 offsetof(struct hc_index_data, flags);
379 u16 flags = REG_RD16(bp, addr);
380 /* clear and set */
381 flags &= ~HC_INDEX_DATA_HC_ENABLED;
382 flags |= enable_flag;
383 REG_WR16(bp, addr, flags);
384 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
385 port, fw_sb_id, sb_index, disable);
386}
387
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200388/* used only at init
389 * locking is done by mcp
390 */
Eilon Greenstein573f2032009-08-12 08:24:14 +0000391void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200392{
393 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
394 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
395 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
396 PCICFG_VENDOR_ID_OFFSET);
397}
398
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200399static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
400{
401 u32 val;
402
403 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
404 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
405 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
406 PCICFG_VENDOR_ID_OFFSET);
407
408 return val;
409}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200410
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000411const u32 dmae_reg_go_c[] = {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200412 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
413 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
414 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
415 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
416};
417
418/* copy command into DMAE command memory and set DMAE command go */
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000419void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200420{
421 u32 cmd_offset;
422 int i;
423
424 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
425 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
426 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
427
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700428 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
429 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200430 }
431 REG_WR(bp, dmae_reg_go_c[idx], 1);
432}
433
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700434void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
435 u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200436{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000437 struct dmae_command dmae;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200438 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700439 int cnt = 200;
440
441 if (!bp->dmae_ready) {
442 u32 *data = bnx2x_sp(bp, wb_data[0]);
443
444 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
445 " using indirect\n", dst_addr, len32);
446 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
447 return;
448 }
449
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000450 memset(&dmae, 0, sizeof(struct dmae_command));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200451
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000452 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
453 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
454 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200455#ifdef __BIG_ENDIAN
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000456 DMAE_CMD_ENDIANITY_B_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200457#else
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000458 DMAE_CMD_ENDIANITY_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200459#endif
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000460 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
461 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
462 dmae.src_addr_lo = U64_LO(dma_addr);
463 dmae.src_addr_hi = U64_HI(dma_addr);
464 dmae.dst_addr_lo = dst_addr >> 2;
465 dmae.dst_addr_hi = 0;
466 dmae.len = len32;
467 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
468 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
469 dmae.comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200470
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000471 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200472 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
473 "dst_addr [%x:%08x (%08x)]\n"
474 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000475 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
476 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
477 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700478 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200479 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
480 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200481
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000482 mutex_lock(&bp->dmae_mutex);
483
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200484 *wb_comp = 0;
485
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000486 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200487
488 udelay(5);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700489
490 while (*wb_comp != DMAE_COMP_VAL) {
491 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
492
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700493 if (!cnt) {
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000494 BNX2X_ERR("DMAE timeout!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200495 break;
496 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700497 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -0700498 /* adjust delay for emulation/FPGA */
499 if (CHIP_REV_IS_SLOW(bp))
500 msleep(100);
501 else
502 udelay(5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200503 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700504
505 mutex_unlock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200506}
507
Yaniv Rosnerc18487e2008-06-23 20:27:52 -0700508void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200509{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000510 struct dmae_command dmae;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200511 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700512 int cnt = 200;
513
514 if (!bp->dmae_ready) {
515 u32 *data = bnx2x_sp(bp, wb_data[0]);
516 int i;
517
518 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
519 " using indirect\n", src_addr, len32);
520 for (i = 0; i < len32; i++)
521 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
522 return;
523 }
524
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000525 memset(&dmae, 0, sizeof(struct dmae_command));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200526
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000527 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
528 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
529 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200530#ifdef __BIG_ENDIAN
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000531 DMAE_CMD_ENDIANITY_B_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200532#else
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000533 DMAE_CMD_ENDIANITY_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200534#endif
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000535 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
536 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
537 dmae.src_addr_lo = src_addr >> 2;
538 dmae.src_addr_hi = 0;
539 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
540 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
541 dmae.len = len32;
542 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
543 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
544 dmae.comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200545
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000546 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200547 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
548 "dst_addr [%x:%08x (%08x)]\n"
549 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000550 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
551 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
552 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200553
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000554 mutex_lock(&bp->dmae_mutex);
555
556 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200557 *wb_comp = 0;
558
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000559 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200560
561 udelay(5);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700562
563 while (*wb_comp != DMAE_COMP_VAL) {
564
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700565 if (!cnt) {
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000566 BNX2X_ERR("DMAE timeout!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200567 break;
568 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700569 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -0700570 /* adjust delay for emulation/FPGA */
571 if (CHIP_REV_IS_SLOW(bp))
572 msleep(100);
573 else
574 udelay(5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200575 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700576 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200577 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
578 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700579
580 mutex_unlock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200581}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200582
Eilon Greenstein573f2032009-08-12 08:24:14 +0000583void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
584 u32 addr, u32 len)
585{
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000586 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
Eilon Greenstein573f2032009-08-12 08:24:14 +0000587 int offset = 0;
588
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000589 while (len > dmae_wr_max) {
Eilon Greenstein573f2032009-08-12 08:24:14 +0000590 bnx2x_write_dmae(bp, phys_addr + offset,
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000591 addr + offset, dmae_wr_max);
592 offset += dmae_wr_max * 4;
593 len -= dmae_wr_max;
Eilon Greenstein573f2032009-08-12 08:24:14 +0000594 }
595
596 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
597}
598
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700599/* used only for slowpath so not inlined */
600static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
601{
602 u32 wb_write[2];
603
604 wb_write[0] = val_hi;
605 wb_write[1] = val_lo;
606 REG_WR_DMAE(bp, reg, wb_write, 2);
607}
608
609#ifdef USE_WB_RD
610static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
611{
612 u32 wb_data[2];
613
614 REG_RD_DMAE(bp, reg, wb_data, 2);
615
616 return HILO_U64(wb_data[0], wb_data[1]);
617}
618#endif
619
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200620static int bnx2x_mc_assert(struct bnx2x *bp)
621{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200622 char last_idx;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700623 int i, rc = 0;
624 u32 row0, row1, row2, row3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200625
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700626 /* XSTORM */
627 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
628 XSTORM_ASSERT_LIST_INDEX_OFFSET);
629 if (last_idx)
630 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200631
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700632 /* print the asserts */
633 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200634
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700635 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
636 XSTORM_ASSERT_LIST_OFFSET(i));
637 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
638 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
639 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
640 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
641 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
642 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200643
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700644 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
645 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
646 " 0x%08x 0x%08x 0x%08x\n",
647 i, row3, row2, row1, row0);
648 rc++;
649 } else {
650 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200651 }
652 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700653
654 /* TSTORM */
655 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
656 TSTORM_ASSERT_LIST_INDEX_OFFSET);
657 if (last_idx)
658 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
659
660 /* print the asserts */
661 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
662
663 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
664 TSTORM_ASSERT_LIST_OFFSET(i));
665 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
666 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
667 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
668 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
669 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
670 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
671
672 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
673 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
674 " 0x%08x 0x%08x 0x%08x\n",
675 i, row3, row2, row1, row0);
676 rc++;
677 } else {
678 break;
679 }
680 }
681
682 /* CSTORM */
683 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
684 CSTORM_ASSERT_LIST_INDEX_OFFSET);
685 if (last_idx)
686 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
687
688 /* print the asserts */
689 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
690
691 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
692 CSTORM_ASSERT_LIST_OFFSET(i));
693 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
694 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
695 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
696 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
697 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
698 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
699
700 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
701 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
702 " 0x%08x 0x%08x 0x%08x\n",
703 i, row3, row2, row1, row0);
704 rc++;
705 } else {
706 break;
707 }
708 }
709
710 /* USTORM */
711 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
712 USTORM_ASSERT_LIST_INDEX_OFFSET);
713 if (last_idx)
714 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
715
716 /* print the asserts */
717 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
718
719 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
720 USTORM_ASSERT_LIST_OFFSET(i));
721 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
722 USTORM_ASSERT_LIST_OFFSET(i) + 4);
723 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
724 USTORM_ASSERT_LIST_OFFSET(i) + 8);
725 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
726 USTORM_ASSERT_LIST_OFFSET(i) + 12);
727
728 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
729 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
730 " 0x%08x 0x%08x 0x%08x\n",
731 i, row3, row2, row1, row0);
732 rc++;
733 } else {
734 break;
735 }
736 }
737
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200738 return rc;
739}
Eliezer Tamirc14423f2008-02-28 11:49:42 -0800740
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200741static void bnx2x_fw_dump(struct bnx2x *bp)
742{
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000743 u32 addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200744 u32 mark, offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +0000745 __be32 data[9];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200746 int word;
747
Vladislav Zolotarov2145a922010-04-19 01:13:49 +0000748 if (BP_NOMCP(bp)) {
749 BNX2X_ERR("NO MCP - can not dump\n");
750 return;
751 }
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000752
753 addr = bp->common.shmem_base - 0x0800 + 4;
754 mark = REG_RD(bp, addr);
755 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
Joe Perches7995c642010-02-17 15:01:52 +0000756 pr_err("begin fw dump (mark 0x%x)\n", mark);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200757
Joe Perches7995c642010-02-17 15:01:52 +0000758 pr_err("");
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000759 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200760 for (word = 0; word < 8; word++)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000761 data[word] = htonl(REG_RD(bp, offset + 4*word));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200762 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000763 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200764 }
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000765 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200766 for (word = 0; word < 8; word++)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000767 data[word] = htonl(REG_RD(bp, offset + 4*word));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200768 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000769 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200770 }
Joe Perches7995c642010-02-17 15:01:52 +0000771 pr_err("end of fw dump\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200772}
773
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000774void bnx2x_panic_dump(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200775{
776 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000777 u16 j;
778 struct hc_sp_status_block_data sp_sb_data;
779 int func = BP_FUNC(bp);
780#ifdef BNX2X_STOP_ON_ERROR
781 u16 start = 0, end = 0;
782#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200783
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700784 bp->stats_state = STATS_STATE_DISABLED;
785 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
786
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200787 BNX2X_ERR("begin crash dump -----------------\n");
788
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000789 /* Indices */
790 /* Common */
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000791 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000792 " spq_prod_idx(0x%x)\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000793 bp->def_idx, bp->def_att_idx,
794 bp->attn_state, bp->spq_prod_idx);
795 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
796 bp->def_status_blk->atten_status_block.attn_bits,
797 bp->def_status_blk->atten_status_block.attn_bits_ack,
798 bp->def_status_blk->atten_status_block.status_block_id,
799 bp->def_status_blk->atten_status_block.attn_bits_index);
800 BNX2X_ERR(" def (");
801 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
802 pr_cont("0x%x%s",
803 bp->def_status_blk->sp_sb.index_values[i],
804 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000805
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000806 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
807 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
808 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
809 i*sizeof(u32));
810
811 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
812 "pf_id(0x%x) vnic_id(0x%x) "
813 "vf_id(0x%x) vf_valid (0x%x)\n",
814 sp_sb_data.igu_sb_id,
815 sp_sb_data.igu_seg_id,
816 sp_sb_data.p_func.pf_id,
817 sp_sb_data.p_func.vnic_id,
818 sp_sb_data.p_func.vf_id,
819 sp_sb_data.p_func.vf_valid);
820
821
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000822 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000823 struct bnx2x_fastpath *fp = &bp->fp[i];
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000824 int loop;
825 struct hc_status_block_data_e1x sb_data_e1x;
826 struct hc_status_block_sm *hc_sm_p =
827 sb_data_e1x.common.state_machine;
828 struct hc_index_data *hc_index_p =
829 sb_data_e1x.index_data;
830 int data_size;
831 u32 *sb_data_p;
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000832
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000833 /* Rx */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000834 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000835 " rx_comp_prod(0x%x)"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000836 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000837 i, fp->rx_bd_prod, fp->rx_bd_cons,
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000838 fp->rx_comp_prod,
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000839 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000840 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000841 " fp_hc_idx(0x%x)\n",
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000842 fp->rx_sge_prod, fp->last_max_sge,
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000843 le16_to_cpu(fp->fp_hc_idx));
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000844
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000845 /* Tx */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000846 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
847 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
848 " *tx_cons_sb(0x%x)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200849 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700850 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000851
852 loop = HC_SB_MAX_INDICES_E1X;
853
854 /* host sb data */
855
856 BNX2X_ERR(" run indexes (");
857 for (j = 0; j < HC_SB_MAX_SM; j++)
858 pr_cont("0x%x%s",
859 fp->sb_running_index[j],
860 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
861
862 BNX2X_ERR(" indexes (");
863 for (j = 0; j < loop; j++)
864 pr_cont("0x%x%s",
865 fp->sb_index_values[j],
866 (j == loop - 1) ? ")" : " ");
867 /* fw sb data */
868 data_size =
869 sizeof(struct hc_status_block_data_e1x);
870 data_size /= sizeof(u32);
871 sb_data_p = (u32 *)&sb_data_e1x;
872 /* copy sb data in here */
873 for (j = 0; j < data_size; j++)
874 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
875 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
876 j * sizeof(u32));
877
878 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
879 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
880 sb_data_e1x.common.p_func.pf_id,
881 sb_data_e1x.common.p_func.vf_id,
882 sb_data_e1x.common.p_func.vf_valid,
883 sb_data_e1x.common.p_func.vnic_id,
884 sb_data_e1x.common.same_igu_sb_1b);
885
886 /* SB_SMs data */
887 for (j = 0; j < HC_SB_MAX_SM; j++) {
888 pr_cont("SM[%d] __flags (0x%x) "
889 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
890 "time_to_expire (0x%x) "
891 "timer_value(0x%x)\n", j,
892 hc_sm_p[j].__flags,
893 hc_sm_p[j].igu_sb_id,
894 hc_sm_p[j].igu_seg_id,
895 hc_sm_p[j].time_to_expire,
896 hc_sm_p[j].timer_value);
897 }
898
899 /* Indecies data */
900 for (j = 0; j < loop; j++) {
901 pr_cont("INDEX[%d] flags (0x%x) "
902 "timeout (0x%x)\n", j,
903 hc_index_p[j].flags,
904 hc_index_p[j].timeout);
905 }
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000906 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200907
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000908#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000909 /* Rings */
910 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000911 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000912 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200913
914 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
915 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000916 for (j = start; j != end; j = RX_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200917 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
918 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
919
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000920 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
921 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200922 }
923
Eilon Greenstein3196a882008-08-13 15:58:49 -0700924 start = RX_SGE(fp->rx_sge_prod);
925 end = RX_SGE(fp->last_max_sge);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000926 for (j = start; j != end; j = RX_SGE(j + 1)) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -0700927 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
928 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
929
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000930 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
931 i, j, rx_sge[1], rx_sge[0], sw_page->page);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -0700932 }
933
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200934 start = RCQ_BD(fp->rx_comp_cons - 10);
935 end = RCQ_BD(fp->rx_comp_cons + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000936 for (j = start; j != end; j = RCQ_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200937 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
938
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000939 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
940 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200941 }
942 }
943
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000944 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000945 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000946 struct bnx2x_fastpath *fp = &bp->fp[i];
947
948 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
949 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
950 for (j = start; j != end; j = TX_BD(j + 1)) {
951 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
952
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000953 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
954 i, j, sw_bd->skb, sw_bd->first_bd);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000955 }
956
957 start = TX_BD(fp->tx_bd_cons - 10);
958 end = TX_BD(fp->tx_bd_cons + 254);
959 for (j = start; j != end; j = TX_BD(j + 1)) {
960 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
961
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000962 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
963 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000964 }
965 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000966#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700967 bnx2x_fw_dump(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200968 bnx2x_mc_assert(bp);
969 BNX2X_ERR("end crash dump -----------------\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200970}
971
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000972void bnx2x_int_enable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200973{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700974 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200975 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
976 u32 val = REG_RD(bp, addr);
977 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +0000978 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200979
980 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000981 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
982 HC_CONFIG_0_REG_INT_LINE_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200983 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
984 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eilon Greenstein8badd272009-02-12 08:36:15 +0000985 } else if (msi) {
986 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
987 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
988 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
989 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200990 } else {
991 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800992 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200993 HC_CONFIG_0_REG_INT_LINE_EN_0 |
994 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800995
Eilon Greenstein8badd272009-02-12 08:36:15 +0000996 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
997 val, port, addr);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800998
999 REG_WR(bp, addr, val);
1000
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001001 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1002 }
1003
Eilon Greenstein8badd272009-02-12 08:36:15 +00001004 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
1005 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001006
1007 REG_WR(bp, addr, val);
Eilon Greenstein37dbbf32009-07-21 05:47:33 +00001008 /*
1009 * Ensure that HC_CONFIG is written before leading/trailing edge config
1010 */
1011 mmiowb();
1012 barrier();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001013
1014 if (CHIP_IS_E1H(bp)) {
1015 /* init leading/trailing edge */
1016 if (IS_E1HMF(bp)) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001017 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001018 if (bp->port.pmf)
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001019 /* enable nig and gpio3 attention */
1020 val |= 0x1100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001021 } else
1022 val = 0xffff;
1023
1024 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1025 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1026 }
Eilon Greenstein37dbbf32009-07-21 05:47:33 +00001027
1028 /* Make sure that interrupts are indeed enabled from here on */
1029 mmiowb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001030}
1031
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001032void bnx2x_int_disable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001033{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001034 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001035 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1036 u32 val = REG_RD(bp, addr);
1037
1038 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1039 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1040 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1041 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1042
1043 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1044 val, port, addr);
1045
Eilon Greenstein8badd272009-02-12 08:36:15 +00001046 /* flush all outstanding writes */
1047 mmiowb();
1048
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001049 REG_WR(bp, addr, val);
1050 if (REG_RD(bp, addr) != val)
1051 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1052}
1053
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001054void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001055{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001056 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +00001057 int i, offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001058
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001059 /* disable interrupt handling */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001060 atomic_inc(&bp->intr_sem);
Eilon Greensteine1510702009-07-21 05:47:41 +00001061 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1062
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07001063 if (disable_hw)
1064 /* prevent the HW from sending interrupts */
1065 bnx2x_int_disable(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001066
1067 /* make sure all ISRs are done */
1068 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001069 synchronize_irq(bp->msix_table[0].vector);
1070 offset = 1;
Michael Chan37b091b2009-10-10 13:46:55 +00001071#ifdef BCM_CNIC
1072 offset++;
1073#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001074 for_each_queue(bp, i)
Eilon Greenstein8badd272009-02-12 08:36:15 +00001075 synchronize_irq(bp->msix_table[i + offset].vector);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001076 } else
1077 synchronize_irq(bp->pdev->irq);
1078
1079 /* make sure sp_task is not running */
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001080 cancel_delayed_work(&bp->sp_task);
1081 flush_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001082}
1083
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001084/* fast path */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001085
1086/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001087 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001088 */
1089
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001090/* Return true if succeeded to acquire the lock */
1091static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1092{
1093 u32 lock_status;
1094 u32 resource_bit = (1 << resource);
1095 int func = BP_FUNC(bp);
1096 u32 hw_lock_control_reg;
1097
1098 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1099
1100 /* Validating that the resource is within range */
1101 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1102 DP(NETIF_MSG_HW,
1103 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1104 resource, HW_LOCK_MAX_RESOURCE_VALUE);
Eric Dumazet0fdf4d02010-08-26 22:03:53 -07001105 return false;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001106 }
1107
1108 if (func <= 5)
1109 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1110 else
1111 hw_lock_control_reg =
1112 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1113
1114 /* Try to acquire the lock */
1115 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1116 lock_status = REG_RD(bp, hw_lock_control_reg);
1117 if (lock_status & resource_bit)
1118 return true;
1119
1120 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1121 return false;
1122}
1123
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001124
Michael Chan993ac7b2009-10-10 13:46:56 +00001125#ifdef BCM_CNIC
1126static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1127#endif
Eilon Greenstein3196a882008-08-13 15:58:49 -07001128
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001129void bnx2x_sp_event(struct bnx2x_fastpath *fp,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001130 union eth_rx_cqe *rr_cqe)
1131{
1132 struct bnx2x *bp = fp->bp;
1133 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1134 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1135
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001136 DP(BNX2X_MSG_SP,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001137 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +00001138 fp->index, cid, command, bp->state,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001139 rr_cqe->ramrod_cqe.ramrod_type);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001140
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001141 switch (command | fp->state) {
1142 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1143 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1144 fp->state = BNX2X_FP_STATE_OPEN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001145 break;
1146
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001147 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1148 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001149 fp->state = BNX2X_FP_STATE_HALTED;
1150 break;
1151
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001152 case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1153 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1154 fp->state = BNX2X_FP_STATE_TERMINATED;
Eliezer Tamir49d66772008-02-28 11:53:13 -08001155 break;
1156
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001157 default:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001158 BNX2X_ERR("unexpected MC reply (%d) "
1159 "fp[%d] state is %x\n",
1160 command, fp->index, fp->state);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001161 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001162 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001163
1164 bp->spq_left++;
1165
1166 /* push the change in fp->state and towards the memory */
1167 smp_wmb();
1168
1169 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001170}
1171
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001172irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001173{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001174 struct bnx2x *bp = netdev_priv(dev_instance);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001175 u16 status = bnx2x_ack_int(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001176 u16 mask;
Eilon Greensteinca003922009-08-12 22:53:28 -07001177 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001178
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001179 /* Return here if interrupt is shared and it's not for us */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001180 if (unlikely(status == 0)) {
1181 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1182 return IRQ_NONE;
1183 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001184 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001185
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001186 /* Return here if interrupt is disabled */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001187 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1188 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1189 return IRQ_HANDLED;
1190 }
1191
Eilon Greenstein3196a882008-08-13 15:58:49 -07001192#ifdef BNX2X_STOP_ON_ERROR
1193 if (unlikely(bp->panic))
1194 return IRQ_HANDLED;
1195#endif
1196
Eilon Greensteinca003922009-08-12 22:53:28 -07001197 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1198 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001199
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001200 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
Eilon Greensteinca003922009-08-12 22:53:28 -07001201 if (status & mask) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001202 /* Handle Rx and Tx according to SB id */
1203 prefetch(fp->rx_cons_sb);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001204 prefetch(fp->tx_cons_sb);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001205 prefetch(&fp->sb_running_index[SM_RX_ID]);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001206 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
Eilon Greensteinca003922009-08-12 22:53:28 -07001207 status &= ~mask;
1208 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001209 }
1210
Michael Chan993ac7b2009-10-10 13:46:56 +00001211#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001212 mask = 0x2;
Michael Chan993ac7b2009-10-10 13:46:56 +00001213 if (status & (mask | 0x1)) {
1214 struct cnic_ops *c_ops = NULL;
1215
1216 rcu_read_lock();
1217 c_ops = rcu_dereference(bp->cnic_ops);
1218 if (c_ops)
1219 c_ops->cnic_handler(bp->cnic_data, NULL);
1220 rcu_read_unlock();
1221
1222 status &= ~mask;
1223 }
1224#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001225
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001226 if (unlikely(status & 0x1)) {
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001227 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001228
1229 status &= ~0x1;
1230 if (!status)
1231 return IRQ_HANDLED;
1232 }
1233
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00001234 if (unlikely(status))
1235 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001236 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001237
1238 return IRQ_HANDLED;
1239}
1240
1241/* end of fast path */
1242
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001243
1244/* Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001245
1246/*
1247 * General service functions
1248 */
1249
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001250int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001251{
Eliezer Tamirf1410642008-02-28 11:51:50 -08001252 u32 lock_status;
1253 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001254 int func = BP_FUNC(bp);
1255 u32 hw_lock_control_reg;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001256 int cnt;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001257
1258 /* Validating that the resource is within range */
1259 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1260 DP(NETIF_MSG_HW,
1261 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1262 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1263 return -EINVAL;
1264 }
1265
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001266 if (func <= 5) {
1267 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1268 } else {
1269 hw_lock_control_reg =
1270 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1271 }
1272
Eliezer Tamirf1410642008-02-28 11:51:50 -08001273 /* Validating that the resource is not already taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001274 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001275 if (lock_status & resource_bit) {
1276 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1277 lock_status, resource_bit);
1278 return -EEXIST;
1279 }
1280
Eilon Greenstein46230476b2008-08-25 15:23:30 -07001281 /* Try for 5 second every 5ms */
1282 for (cnt = 0; cnt < 1000; cnt++) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08001283 /* Try to acquire the lock */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001284 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1285 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001286 if (lock_status & resource_bit)
1287 return 0;
1288
1289 msleep(5);
1290 }
1291 DP(NETIF_MSG_HW, "Timeout\n");
1292 return -EAGAIN;
1293}
1294
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001295int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001296{
1297 u32 lock_status;
1298 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001299 int func = BP_FUNC(bp);
1300 u32 hw_lock_control_reg;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001301
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001302 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1303
Eliezer Tamirf1410642008-02-28 11:51:50 -08001304 /* Validating that the resource is within range */
1305 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1306 DP(NETIF_MSG_HW,
1307 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1308 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1309 return -EINVAL;
1310 }
1311
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001312 if (func <= 5) {
1313 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1314 } else {
1315 hw_lock_control_reg =
1316 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1317 }
1318
Eliezer Tamirf1410642008-02-28 11:51:50 -08001319 /* Validating that the resource is currently taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001320 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001321 if (!(lock_status & resource_bit)) {
1322 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1323 lock_status, resource_bit);
1324 return -EFAULT;
1325 }
1326
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001327 REG_WR(bp, hw_lock_control_reg, resource_bit);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001328 return 0;
1329}
1330
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001331
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001332int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1333{
1334 /* The GPIO should be swapped if swap register is set and active */
1335 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1336 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1337 int gpio_shift = gpio_num +
1338 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1339 u32 gpio_mask = (1 << gpio_shift);
1340 u32 gpio_reg;
1341 int value;
1342
1343 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1344 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1345 return -EINVAL;
1346 }
1347
1348 /* read GPIO value */
1349 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1350
1351 /* get the requested pin value */
1352 if ((gpio_reg & gpio_mask) == gpio_mask)
1353 value = 1;
1354 else
1355 value = 0;
1356
1357 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1358
1359 return value;
1360}
1361
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001362int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001363{
1364 /* The GPIO should be swapped if swap register is set and active */
1365 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001366 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001367 int gpio_shift = gpio_num +
1368 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1369 u32 gpio_mask = (1 << gpio_shift);
1370 u32 gpio_reg;
1371
1372 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1373 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1374 return -EINVAL;
1375 }
1376
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001377 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001378 /* read GPIO and mask except the float bits */
1379 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1380
1381 switch (mode) {
1382 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1383 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1384 gpio_num, gpio_shift);
1385 /* clear FLOAT and set CLR */
1386 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1387 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1388 break;
1389
1390 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1391 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1392 gpio_num, gpio_shift);
1393 /* clear FLOAT and set SET */
1394 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1395 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1396 break;
1397
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001398 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001399 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1400 gpio_num, gpio_shift);
1401 /* set FLOAT */
1402 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1403 break;
1404
1405 default:
1406 break;
1407 }
1408
1409 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001410 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001411
1412 return 0;
1413}
1414
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001415int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1416{
1417 /* The GPIO should be swapped if swap register is set and active */
1418 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1419 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1420 int gpio_shift = gpio_num +
1421 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1422 u32 gpio_mask = (1 << gpio_shift);
1423 u32 gpio_reg;
1424
1425 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1426 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1427 return -EINVAL;
1428 }
1429
1430 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1431 /* read GPIO int */
1432 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1433
1434 switch (mode) {
1435 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1436 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1437 "output low\n", gpio_num, gpio_shift);
1438 /* clear SET and set CLR */
1439 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1440 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1441 break;
1442
1443 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1444 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1445 "output high\n", gpio_num, gpio_shift);
1446 /* clear CLR and set SET */
1447 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1448 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1449 break;
1450
1451 default:
1452 break;
1453 }
1454
1455 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1456 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1457
1458 return 0;
1459}
1460
Eliezer Tamirf1410642008-02-28 11:51:50 -08001461static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1462{
1463 u32 spio_mask = (1 << spio_num);
1464 u32 spio_reg;
1465
1466 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1467 (spio_num > MISC_REGISTERS_SPIO_7)) {
1468 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1469 return -EINVAL;
1470 }
1471
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001472 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001473 /* read SPIO and mask except the float bits */
1474 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1475
1476 switch (mode) {
Eilon Greenstein6378c022008-08-13 15:59:25 -07001477 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001478 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1479 /* clear FLOAT and set CLR */
1480 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1481 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1482 break;
1483
Eilon Greenstein6378c022008-08-13 15:59:25 -07001484 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001485 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1486 /* clear FLOAT and set SET */
1487 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1488 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1489 break;
1490
1491 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1492 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1493 /* set FLOAT */
1494 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1495 break;
1496
1497 default:
1498 break;
1499 }
1500
1501 REG_WR(bp, MISC_REG_SPIO, spio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001502 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001503
1504 return 0;
1505}
1506
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001507int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1508{
1509 u32 sel_phy_idx = 0;
1510 if (bp->link_vars.link_up) {
1511 sel_phy_idx = EXT_PHY1;
1512 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1513 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1514 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1515 sel_phy_idx = EXT_PHY2;
1516 } else {
1517
1518 switch (bnx2x_phy_selection(&bp->link_params)) {
1519 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1520 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1521 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1522 sel_phy_idx = EXT_PHY1;
1523 break;
1524 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1525 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1526 sel_phy_idx = EXT_PHY2;
1527 break;
1528 }
1529 }
1530 /*
1531 * The selected actived PHY is always after swapping (in case PHY
1532 * swapping is enabled). So when swapping is enabled, we need to reverse
1533 * the configuration
1534 */
1535
1536 if (bp->link_params.multi_phy_config &
1537 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1538 if (sel_phy_idx == EXT_PHY1)
1539 sel_phy_idx = EXT_PHY2;
1540 else if (sel_phy_idx == EXT_PHY2)
1541 sel_phy_idx = EXT_PHY1;
1542 }
1543 return LINK_CONFIG_IDX(sel_phy_idx);
1544}
1545
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001546void bnx2x_calc_fc_adv(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001547{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001548 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
Eilon Greensteinad33ea32009-01-14 21:24:57 -08001549 switch (bp->link_vars.ieee_fc &
1550 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001551 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001552 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001553 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001554 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001555
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001556 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001557 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001558 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001559 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001560
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001561 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001562 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001563 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001564
Eliezer Tamirf1410642008-02-28 11:51:50 -08001565 default:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001566 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001567 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001568 break;
1569 }
1570}
1571
Eilon Greenstein2691d512009-08-12 08:22:08 +00001572
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001573u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001574{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001575 if (!BP_NOMCP(bp)) {
1576 u8 rc;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001577 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1578 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
Eilon Greenstein19680c42008-08-13 15:47:33 -07001579 /* Initialize link parameters structure variables */
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07001580 /* It is recommended to turn off RX FC for jumbo frames
1581 for better performance */
Eilon Greenstein0c593272009-08-12 08:22:13 +00001582 if (bp->dev->mtu > 5000)
David S. Millerc0700f92008-12-16 23:53:20 -08001583 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07001584 else
David S. Millerc0700f92008-12-16 23:53:20 -08001585 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001586
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001587 bnx2x_acquire_phy_lock(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001588
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001589 if (load_mode == LOAD_DIAG) {
Yaniv Rosnerde6eae12010-09-07 11:41:13 +00001590 bp->link_params.loopback_mode = LOOPBACK_XGXS;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001591 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1592 }
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001593
Eilon Greenstein19680c42008-08-13 15:47:33 -07001594 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001595
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001596 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001597
Eilon Greenstein3c96c682009-01-14 21:25:31 -08001598 bnx2x_calc_fc_adv(bp);
1599
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001600 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1601 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001602 bnx2x_link_report(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001603 }
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001604 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
Eilon Greenstein19680c42008-08-13 15:47:33 -07001605 return rc;
1606 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001607 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
Eilon Greenstein19680c42008-08-13 15:47:33 -07001608 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001609}
1610
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001611void bnx2x_link_set(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001612{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001613 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001614 bnx2x_acquire_phy_lock(bp);
Yaniv Rosner54c2fb72010-09-01 09:51:23 +00001615 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001616 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001617 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001618
Eilon Greenstein19680c42008-08-13 15:47:33 -07001619 bnx2x_calc_fc_adv(bp);
1620 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00001621 BNX2X_ERR("Bootcode is missing - can not set link\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001622}
1623
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001624static void bnx2x__link_reset(struct bnx2x *bp)
1625{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001626 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001627 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein589abe32009-02-12 08:36:55 +00001628 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001629 bnx2x_release_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001630 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00001631 BNX2X_ERR("Bootcode is missing - can not reset link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001632}
1633
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001634u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001635{
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001636 u8 rc = 0;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001637
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001638 if (!BP_NOMCP(bp)) {
1639 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001640 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1641 is_serdes);
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001642 bnx2x_release_phy_lock(bp);
1643 } else
1644 BNX2X_ERR("Bootcode is missing - can not test link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001645
1646 return rc;
1647}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001648
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001649static void bnx2x_init_port_minmax(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001650{
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001651 u32 r_param = bp->link_vars.line_speed / 8;
1652 u32 fair_periodic_timeout_usec;
1653 u32 t_fair;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001654
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001655 memset(&(bp->cmng.rs_vars), 0,
1656 sizeof(struct rate_shaping_vars_per_port));
1657 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001658
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001659 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1660 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001661
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001662 /* this is the threshold below which no timer arming will occur
1663 1.25 coefficient is for the threshold to be a little bigger
1664 than the real time, to compensate for timer in-accuracy */
1665 bp->cmng.rs_vars.rs_threshold =
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001666 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1667
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001668 /* resolution of fairness timer */
1669 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1670 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1671 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001672
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001673 /* this is the threshold below which we won't arm the timer anymore */
1674 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001675
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001676 /* we multiply by 1e3/8 to get bytes/msec.
1677 We don't want the credits to pass a credit
1678 of the t_fair*FAIR_MEM (algorithm resolution) */
1679 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1680 /* since each tick is 4 usec */
1681 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001682}
1683
Eilon Greenstein2691d512009-08-12 08:22:08 +00001684/* Calculates the sum of vn_min_rates.
1685 It's needed for further normalizing of the min_rates.
1686 Returns:
1687 sum of vn_min_rates.
1688 or
1689 0 - if all the min_rates are 0.
1690 In the later case fainess algorithm should be deactivated.
1691 If not all min_rates are zero then those that are zeroes will be set to 1.
1692 */
1693static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1694{
1695 int all_zero = 1;
1696 int port = BP_PORT(bp);
1697 int vn;
1698
1699 bp->vn_weight_sum = 0;
1700 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1701 int func = 2*vn + port;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001702 u32 vn_cfg = MF_CFG_RD(bp, func_mf_config[func].config);
Eilon Greenstein2691d512009-08-12 08:22:08 +00001703 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1704 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1705
1706 /* Skip hidden vns */
1707 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1708 continue;
1709
1710 /* If min rate is zero - set it to 1 */
1711 if (!vn_min_rate)
1712 vn_min_rate = DEF_MIN_RATE;
1713 else
1714 all_zero = 0;
1715
1716 bp->vn_weight_sum += vn_min_rate;
1717 }
1718
1719 /* ... only if all min rates are zeros - disable fairness */
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001720 if (all_zero) {
1721 bp->cmng.flags.cmng_enables &=
1722 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1723 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1724 " fairness will be disabled\n");
1725 } else
1726 bp->cmng.flags.cmng_enables |=
1727 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
Eilon Greenstein2691d512009-08-12 08:22:08 +00001728}
1729
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001730static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001731{
1732 struct rate_shaping_vars_per_vn m_rs_vn;
1733 struct fairness_vars_per_vn m_fair_vn;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001734 u32 vn_cfg = MF_CFG_RD(bp, func_mf_config[func].config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001735 u16 vn_min_rate, vn_max_rate;
1736 int i;
1737
1738 /* If function is hidden - set min and max to zeroes */
1739 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1740 vn_min_rate = 0;
1741 vn_max_rate = 0;
1742
1743 } else {
1744 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1745 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001746 /* If min rate is zero - set it to 1 */
1747 if (!vn_min_rate)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001748 vn_min_rate = DEF_MIN_RATE;
1749 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1750 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1751 }
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001752 DP(NETIF_MSG_IFUP,
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001753 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001754 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001755
1756 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1757 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1758
1759 /* global vn counter - maximal Mbps for this vn */
1760 m_rs_vn.vn_counter.rate = vn_max_rate;
1761
1762 /* quota - number of bytes transmitted in this period */
1763 m_rs_vn.vn_counter.quota =
1764 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1765
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001766 if (bp->vn_weight_sum) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001767 /* credit for each period of the fairness algorithm:
1768 number of bytes in T_FAIR (the vn share the port rate).
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001769 vn_weight_sum should not be larger than 10000, thus
1770 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1771 than zero */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001772 m_fair_vn.vn_credit_delta =
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00001773 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1774 (8 * bp->vn_weight_sum))),
1775 (bp->cmng.fair_vars.fair_threshold * 2));
1776 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001777 m_fair_vn.vn_credit_delta);
1778 }
1779
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001780 /* Store it to internal memory */
1781 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1782 REG_WR(bp, BAR_XSTRORM_INTMEM +
1783 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1784 ((u32 *)(&m_rs_vn))[i]);
1785
1786 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1787 REG_WR(bp, BAR_XSTRORM_INTMEM +
1788 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1789 ((u32 *)(&m_fair_vn))[i]);
1790}
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001791static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
1792{
1793 if (CHIP_REV_IS_SLOW(bp))
1794 return CMNG_FNS_NONE;
1795 if (IS_E1HMF(bp))
1796 return CMNG_FNS_MINMAX;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001797
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001798 return CMNG_FNS_NONE;
1799}
1800
1801static void bnx2x_read_mf_cfg(struct bnx2x *bp)
1802{
1803 int vn;
1804
1805 if (BP_NOMCP(bp))
1806 return; /* what should be the default bvalue in this case */
1807
1808 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1809 int /*abs*/func = 2*vn + BP_PORT(bp);
1810 bp->mf_config =
1811 MF_CFG_RD(bp, func_mf_config[func].config);
1812 }
1813}
1814
1815static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
1816{
1817
1818 if (cmng_type == CMNG_FNS_MINMAX) {
1819 int vn;
1820
1821 /* clear cmng_enables */
1822 bp->cmng.flags.cmng_enables = 0;
1823
1824 /* read mf conf from shmem */
1825 if (read_cfg)
1826 bnx2x_read_mf_cfg(bp);
1827
1828 /* Init rate shaping and fairness contexts */
1829 bnx2x_init_port_minmax(bp);
1830
1831 /* vn_weight_sum and enable fairness if not 0 */
1832 bnx2x_calc_vn_weight_sum(bp);
1833
1834 /* calculate and set min-max rate for each vn */
1835 for (vn = VN_0; vn < E1HVN_MAX; vn++)
1836 bnx2x_init_vn_minmax(bp, vn);
1837
1838 /* always enable rate shaping and fairness */
1839 bp->cmng.flags.cmng_enables |=
1840 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
1841 if (!bp->vn_weight_sum)
1842 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1843 " fairness will be disabled\n");
1844 return;
1845 }
1846
1847 /* rate shaping and fairness are disabled */
1848 DP(NETIF_MSG_IFUP,
1849 "rate shaping and fairness are disabled\n");
1850}
1851
1852static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
1853{
1854 int port = BP_PORT(bp);
1855 int func;
1856 int vn;
1857
1858 /* Set the attention towards other drivers on the same port */
1859 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1860 if (vn == BP_E1HVN(bp))
1861 continue;
1862
1863 func = ((vn << 1) | port);
1864 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1865 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1866 }
1867}
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001868
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001869/* This function is called upon link interrupt */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001870static void bnx2x_link_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001871{
Vladislav Zolotarovd9e8b182010-04-19 01:15:08 +00001872 u32 prev_link_status = bp->link_vars.link_status;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001873 /* Make sure that we are synced with the current statistics */
1874 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1875
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001876 bnx2x_link_update(&bp->link_params, &bp->link_vars);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001877
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001878 if (bp->link_vars.link_up) {
1879
Eilon Greenstein1c063282009-02-12 08:36:43 +00001880 /* dropless flow control */
Eilon Greensteina18f5122009-08-12 08:23:26 +00001881 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
Eilon Greenstein1c063282009-02-12 08:36:43 +00001882 int port = BP_PORT(bp);
1883 u32 pause_enabled = 0;
1884
1885 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1886 pause_enabled = 1;
1887
1888 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07001889 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
Eilon Greenstein1c063282009-02-12 08:36:43 +00001890 pause_enabled);
1891 }
1892
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001893 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
1894 struct host_port_stats *pstats;
1895
1896 pstats = bnx2x_sp(bp, port_stats);
1897 /* reset old bmac stats */
1898 memset(&(pstats->mac_stx[0]), 0,
1899 sizeof(struct mac_stx));
1900 }
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07001901 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001902 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1903 }
1904
Vladislav Zolotarovd9e8b182010-04-19 01:15:08 +00001905 /* indicate link status only if link status actually changed */
1906 if (prev_link_status != bp->link_vars.link_status)
1907 bnx2x_link_report(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001908
1909 if (IS_E1HMF(bp)) {
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001910 int port = BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001911 int func;
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001912 int vn;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001913
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00001914 /* Set the attention towards other drivers on the same port */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001915 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1916 if (vn == BP_E1HVN(bp))
1917 continue;
1918
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001919 func = ((vn << 1) | port);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001920 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1921 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1922 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001923
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001924 if (bp->link_vars.link_up) {
1925 int i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001926
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001927 /* Init rate shaping and fairness contexts */
1928 bnx2x_init_port_minmax(bp);
1929
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001930 for (vn = VN_0; vn < E1HVN_MAX; vn++)
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001931 bnx2x_init_vn_minmax(bp, 2*vn + port);
1932
1933 /* Store it to internal memory */
1934 for (i = 0;
1935 i < sizeof(struct cmng_struct_per_port) / 4; i++)
1936 REG_WR(bp, BAR_XSTRORM_INTMEM +
1937 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1938 ((u32 *)(&bp->cmng))[i]);
1939 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001940 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001941}
1942
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001943void bnx2x__link_status_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001944{
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07001945 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001946 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001947
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001948 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
1949
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001950 if (bp->link_vars.link_up)
1951 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1952 else
1953 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1954
Eilon Greenstein2691d512009-08-12 08:22:08 +00001955 bnx2x_calc_vn_weight_sum(bp);
1956
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001957 /* indicate link status */
1958 bnx2x_link_report(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001959}
1960
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001961static void bnx2x_pmf_update(struct bnx2x *bp)
1962{
1963 int port = BP_PORT(bp);
1964 u32 val;
1965
1966 bp->port.pmf = 1;
1967 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1968
1969 /* enable nig attention */
1970 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
1971 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1972 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001973
1974 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001975}
1976
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001977/* end of Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001978
1979/* slow path */
1980
1981/*
1982 * General service functions
1983 */
1984
Eilon Greenstein2691d512009-08-12 08:22:08 +00001985/* send the MCP a request, block until there is a reply */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001986u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
Eilon Greenstein2691d512009-08-12 08:22:08 +00001987{
1988 int func = BP_FUNC(bp);
1989 u32 seq = ++bp->fw_seq;
1990 u32 rc = 0;
1991 u32 cnt = 1;
1992 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
1993
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07001994 mutex_lock(&bp->fw_mb_mutex);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001995 SHMEM_WR(bp, func_mb[func].drv_mb_param, param);
Eilon Greenstein2691d512009-08-12 08:22:08 +00001996 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
1997 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
1998
1999 do {
2000 /* let the FW do it's magic ... */
2001 msleep(delay);
2002
2003 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2004
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002005 /* Give the FW up to 5 second (500*10ms) */
2006 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
Eilon Greenstein2691d512009-08-12 08:22:08 +00002007
2008 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2009 cnt*delay, rc, seq);
2010
2011 /* is this a reply to our command? */
2012 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2013 rc &= FW_MSG_CODE_MASK;
2014 else {
2015 /* FW BUG! */
2016 BNX2X_ERR("FW failed to respond!\n");
2017 bnx2x_fw_dump(bp);
2018 rc = 0;
2019 }
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002020 mutex_unlock(&bp->fw_mb_mutex);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002021
2022 return rc;
2023}
2024
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002025/* must be called under rtnl_lock */
2026void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2027{
2028 u32 mask = (1 << cl_id);
2029
2030 /* initial seeting is BNX2X_ACCEPT_NONE */
2031 u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2032 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2033 u8 unmatched_unicast = 0;
2034
2035 if (filters & BNX2X_PROMISCUOUS_MODE) {
2036 /* promiscious - accept all, drop none */
2037 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2038 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
2039 }
2040 if (filters & BNX2X_ACCEPT_UNICAST) {
2041 /* accept matched ucast */
2042 drop_all_ucast = 0;
2043 }
2044 if (filters & BNX2X_ACCEPT_MULTICAST) {
2045 /* accept matched mcast */
2046 drop_all_mcast = 0;
2047 }
2048 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2049 /* accept all mcast */
2050 drop_all_ucast = 0;
2051 accp_all_ucast = 1;
2052 }
2053 if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2054 /* accept all mcast */
2055 drop_all_mcast = 0;
2056 accp_all_mcast = 1;
2057 }
2058 if (filters & BNX2X_ACCEPT_BROADCAST) {
2059 /* accept (all) bcast */
2060 drop_all_bcast = 0;
2061 accp_all_bcast = 1;
2062 }
2063
2064 bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2065 bp->mac_filters.ucast_drop_all | mask :
2066 bp->mac_filters.ucast_drop_all & ~mask;
2067
2068 bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2069 bp->mac_filters.mcast_drop_all | mask :
2070 bp->mac_filters.mcast_drop_all & ~mask;
2071
2072 bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2073 bp->mac_filters.bcast_drop_all | mask :
2074 bp->mac_filters.bcast_drop_all & ~mask;
2075
2076 bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2077 bp->mac_filters.ucast_accept_all | mask :
2078 bp->mac_filters.ucast_accept_all & ~mask;
2079
2080 bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2081 bp->mac_filters.mcast_accept_all | mask :
2082 bp->mac_filters.mcast_accept_all & ~mask;
2083
2084 bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2085 bp->mac_filters.bcast_accept_all | mask :
2086 bp->mac_filters.bcast_accept_all & ~mask;
2087
2088 bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2089 bp->mac_filters.unmatched_unicast | mask :
2090 bp->mac_filters.unmatched_unicast & ~mask;
2091}
2092
2093void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2094{
2095 if (FUNC_CONFIG(p->func_flgs)) {
2096 struct tstorm_eth_function_common_config tcfg = {0};
2097
2098 /* tpa */
2099 if (p->func_flgs & FUNC_FLG_TPA)
2100 tcfg.config_flags |=
2101 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
2102
2103 /* set rss flags */
2104 if (p->func_flgs & FUNC_FLG_RSS) {
2105 u16 rss_flgs = (p->rss->mode <<
2106 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
2107
2108 if (p->rss->cap & RSS_IPV4_CAP)
2109 rss_flgs |= RSS_IPV4_CAP_MASK;
2110 if (p->rss->cap & RSS_IPV4_TCP_CAP)
2111 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2112 if (p->rss->cap & RSS_IPV6_CAP)
2113 rss_flgs |= RSS_IPV6_CAP_MASK;
2114 if (p->rss->cap & RSS_IPV6_TCP_CAP)
2115 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
2116
2117 tcfg.config_flags |= rss_flgs;
2118 tcfg.rss_result_mask = p->rss->result_mask;
2119
2120 }
2121
2122 storm_memset_func_cfg(bp, &tcfg, p->func_id);
2123 }
2124
2125 /* Enable the function in the FW */
2126 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2127 storm_memset_func_en(bp, p->func_id, 1);
2128
2129 /* statistics */
2130 if (p->func_flgs & FUNC_FLG_STATS) {
2131 struct stats_indication_flags stats_flags = {0};
2132 stats_flags.collect_eth = 1;
2133
2134 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2135 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2136
2137 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2138 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2139
2140 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2141 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2142
2143 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2144 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2145 }
2146
2147 /* spq */
2148 if (p->func_flgs & FUNC_FLG_SPQ) {
2149 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2150 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2151 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2152 }
2153}
2154
2155static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2156 struct bnx2x_fastpath *fp)
2157{
2158 u16 flags = 0;
2159
2160 /* calculate queue flags */
2161 flags |= QUEUE_FLG_CACHE_ALIGN;
2162 flags |= QUEUE_FLG_HC;
2163 flags |= IS_E1HMF(bp) ? QUEUE_FLG_OV : 0;
2164
2165#ifdef BCM_VLAN
2166 flags |= QUEUE_FLG_VLAN;
2167 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
2168#endif
2169
2170 if (!fp->disable_tpa)
2171 flags |= QUEUE_FLG_TPA;
2172
2173 flags |= QUEUE_FLG_STATS;
2174
2175 return flags;
2176}
2177
2178static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2179 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2180 struct bnx2x_rxq_init_params *rxq_init)
2181{
2182 u16 max_sge = 0;
2183 u16 sge_sz = 0;
2184 u16 tpa_agg_size = 0;
2185
2186 /* calculate queue flags */
2187 u16 flags = bnx2x_get_cl_flags(bp, fp);
2188
2189 if (!fp->disable_tpa) {
2190 pause->sge_th_hi = 250;
2191 pause->sge_th_lo = 150;
2192 tpa_agg_size = min_t(u32,
2193 (min_t(u32, 8, MAX_SKB_FRAGS) *
2194 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2195 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2196 SGE_PAGE_SHIFT;
2197 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2198 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2199 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2200 0xffff);
2201 }
2202
2203 /* pause - not for e1 */
2204 if (!CHIP_IS_E1(bp)) {
2205 pause->bd_th_hi = 350;
2206 pause->bd_th_lo = 250;
2207 pause->rcq_th_hi = 350;
2208 pause->rcq_th_lo = 250;
2209 pause->sge_th_hi = 0;
2210 pause->sge_th_lo = 0;
2211 pause->pri_map = 1;
2212 }
2213
2214 /* rxq setup */
2215 rxq_init->flags = flags;
2216 rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2217 rxq_init->dscr_map = fp->rx_desc_mapping;
2218 rxq_init->sge_map = fp->rx_sge_mapping;
2219 rxq_init->rcq_map = fp->rx_comp_mapping;
2220 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2221 rxq_init->mtu = bp->dev->mtu;
2222 rxq_init->buf_sz = bp->rx_buf_size;
2223 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2224 rxq_init->cl_id = fp->cl_id;
2225 rxq_init->spcl_id = fp->cl_id;
2226 rxq_init->stat_id = fp->cl_id;
2227 rxq_init->tpa_agg_sz = tpa_agg_size;
2228 rxq_init->sge_buf_sz = sge_sz;
2229 rxq_init->max_sges_pkt = max_sge;
2230 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2231 rxq_init->fw_sb_id = fp->fw_sb_id;
2232
2233 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
2234
2235 rxq_init->cid = HW_CID(bp, fp->cid);
2236
2237 rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2238}
2239
2240static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2241 struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2242{
2243 u16 flags = bnx2x_get_cl_flags(bp, fp);
2244
2245 txq_init->flags = flags;
2246 txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2247 txq_init->dscr_map = fp->tx_desc_mapping;
2248 txq_init->stat_id = fp->cl_id;
2249 txq_init->cid = HW_CID(bp, fp->cid);
2250 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2251 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2252 txq_init->fw_sb_id = fp->fw_sb_id;
2253 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2254}
2255
2256void bnx2x_pf_init(struct bnx2x *bp)
2257{
2258 struct bnx2x_func_init_params func_init = {0};
2259 struct bnx2x_rss_params rss = {0};
2260 struct event_ring_data eq_data = { {0} };
2261 u16 flags;
2262
2263 /* pf specific setups */
2264 if (!CHIP_IS_E1(bp))
2265 storm_memset_ov(bp, bp->e1hov, BP_FUNC(bp));
2266
2267 /* function setup flags */
2268 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2269
2270 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2271
2272 /**
2273 * Although RSS is meaningless when there is a single HW queue we
2274 * still need it enabled in order to have HW Rx hash generated.
2275 *
2276 * if (is_eth_multi(bp))
2277 * flags |= FUNC_FLG_RSS;
2278 */
2279
2280 /* function setup */
2281 if (flags & FUNC_FLG_RSS) {
2282 rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2283 RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2284 rss.mode = bp->multi_mode;
2285 rss.result_mask = MULTI_MASK;
2286 func_init.rss = &rss;
2287 }
2288
2289 func_init.func_flgs = flags;
2290 func_init.pf_id = BP_FUNC(bp);
2291 func_init.func_id = BP_FUNC(bp);
2292 func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2293 func_init.spq_map = bp->spq_mapping;
2294 func_init.spq_prod = bp->spq_prod_idx;
2295
2296 bnx2x_func_init(bp, &func_init);
2297
2298 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2299
2300 /*
2301 Congestion management values depend on the link rate
2302 There is no active link so initial link rate is set to 10 Gbps.
2303 When the link comes up The congestion management values are
2304 re-calculated according to the actual link rate.
2305 */
2306 bp->link_vars.line_speed = SPEED_10000;
2307 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2308
2309 /* Only the PMF sets the HW */
2310 if (bp->port.pmf)
2311 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2312
2313 /* no rx until link is up */
2314 bp->rx_mode = BNX2X_RX_MODE_NONE;
2315 bnx2x_set_storm_rx_mode(bp);
2316
2317 /* init Event Queue */
2318 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2319 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2320 eq_data.producer = bp->eq_prod;
2321 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2322 eq_data.sb_id = DEF_SB_ID;
2323 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2324}
2325
2326
Eilon Greenstein2691d512009-08-12 08:22:08 +00002327static void bnx2x_e1h_disable(struct bnx2x *bp)
2328{
2329 int port = BP_PORT(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002330
2331 netif_tx_disable(bp->dev);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002332
2333 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2334
Eilon Greenstein2691d512009-08-12 08:22:08 +00002335 netif_carrier_off(bp->dev);
2336}
2337
2338static void bnx2x_e1h_enable(struct bnx2x *bp)
2339{
2340 int port = BP_PORT(bp);
2341
2342 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2343
Eilon Greenstein2691d512009-08-12 08:22:08 +00002344 /* Tx queue should be only reenabled */
2345 netif_tx_wake_all_queues(bp->dev);
2346
Eilon Greenstein061bc702009-10-15 00:18:47 -07002347 /*
2348 * Should not call netif_carrier_on since it will be called if the link
2349 * is up when checking for link state
2350 */
Eilon Greenstein2691d512009-08-12 08:22:08 +00002351}
2352
Eilon Greenstein2691d512009-08-12 08:22:08 +00002353static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2354{
Eilon Greenstein2691d512009-08-12 08:22:08 +00002355 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002356
2357 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2358
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002359 /*
2360 * This is the only place besides the function initialization
2361 * where the bp->flags can change so it is done without any
2362 * locks
2363 */
Eilon Greenstein2691d512009-08-12 08:22:08 +00002364 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2365 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002366 bp->flags |= MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002367
2368 bnx2x_e1h_disable(bp);
2369 } else {
2370 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002371 bp->flags &= ~MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002372
2373 bnx2x_e1h_enable(bp);
2374 }
2375 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2376 }
2377 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2378
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002379 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2380 bnx2x_link_sync_notify(bp);
2381 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
Eilon Greenstein2691d512009-08-12 08:22:08 +00002382 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2383 }
2384
2385 /* Report results to MCP */
2386 if (dcc_event)
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002387 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002388 else
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002389 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002390}
2391
Michael Chan28912902009-10-10 13:46:53 +00002392/* must be called under the spq lock */
2393static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2394{
2395 struct eth_spe *next_spe = bp->spq_prod_bd;
2396
2397 if (bp->spq_prod_bd == bp->spq_last_bd) {
2398 bp->spq_prod_bd = bp->spq;
2399 bp->spq_prod_idx = 0;
2400 DP(NETIF_MSG_TIMER, "end of spq\n");
2401 } else {
2402 bp->spq_prod_bd++;
2403 bp->spq_prod_idx++;
2404 }
2405 return next_spe;
2406}
2407
2408/* must be called under the spq lock */
2409static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2410{
2411 int func = BP_FUNC(bp);
2412
2413 /* Make sure that BD data is updated before writing the producer */
2414 wmb();
2415
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002416 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
Michael Chan28912902009-10-10 13:46:53 +00002417 bp->spq_prod_idx);
2418 mmiowb();
2419}
2420
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002421/* the slow path queue is odd since completions arrive on the fastpath ring */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002422int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002423 u32 data_hi, u32 data_lo, int common)
2424{
Michael Chan28912902009-10-10 13:46:53 +00002425 struct eth_spe *spe;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002426 u16 type;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002427
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002428#ifdef BNX2X_STOP_ON_ERROR
2429 if (unlikely(bp->panic))
2430 return -EIO;
2431#endif
2432
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002433 spin_lock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002434
2435 if (!bp->spq_left) {
2436 BNX2X_ERR("BUG! SPQ ring full!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002437 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002438 bnx2x_panic();
2439 return -EBUSY;
2440 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08002441
Michael Chan28912902009-10-10 13:46:53 +00002442 spe = bnx2x_sp_get_next(bp);
2443
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002444 /* CID needs port number to be encoded int it */
Michael Chan28912902009-10-10 13:46:53 +00002445 spe->hdr.conn_and_cmd_data =
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002446 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2447 HW_CID(bp, cid));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002448
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002449 if (common)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002450 /* Common ramrods:
2451 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2452 * TRAFFIC_STOP, TRAFFIC_START
2453 */
2454 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2455 & SPE_HDR_CONN_TYPE;
2456 else
2457 /* ETH ramrods: SETUP, HALT */
2458 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2459 & SPE_HDR_CONN_TYPE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002460
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002461 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2462 SPE_HDR_FUNCTION_ID);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002463
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002464 spe->hdr.type = cpu_to_le16(type);
2465
2466 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2467 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2468
2469 /* stats ramrod has it's own slot on the spq */
2470 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
2471 /* It's ok if the actual decrement is issued towards the memory
2472 * somewhere between the spin_lock and spin_unlock. Thus no
2473 * more explict memory barrier is needed.
2474 */
2475 bp->spq_left--;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002476
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002477 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002478 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
2479 "type(0x%x) left %x\n",
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002480 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2481 (u32)(U64_LO(bp->spq_mapping) +
2482 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002483 HW_CID(bp, cid), data_hi, data_lo, type, bp->spq_left);
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002484
Michael Chan28912902009-10-10 13:46:53 +00002485 bnx2x_sp_prod_update(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002486 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002487 return 0;
2488}
2489
2490/* acquire split MCP access lock register */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002491static int bnx2x_acquire_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002492{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002493 u32 j, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002494 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002495
2496 might_sleep();
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002497 for (j = 0; j < 1000; j++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002498 val = (1UL << 31);
2499 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2500 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2501 if (val & (1L << 31))
2502 break;
2503
2504 msleep(5);
2505 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002506 if (!(val & (1L << 31))) {
Eilon Greenstein19680c42008-08-13 15:47:33 -07002507 BNX2X_ERR("Cannot acquire MCP access lock register\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002508 rc = -EBUSY;
2509 }
2510
2511 return rc;
2512}
2513
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002514/* release split MCP access lock register */
2515static void bnx2x_release_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002516{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002517 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002518}
2519
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002520#define BNX2X_DEF_SB_ATT_IDX 0x0001
2521#define BNX2X_DEF_SB_IDX 0x0002
2522
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002523static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2524{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002525 struct host_sp_status_block *def_sb = bp->def_status_blk;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002526 u16 rc = 0;
2527
2528 barrier(); /* status block is written to by the chip */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002529 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2530 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002531 rc |= BNX2X_DEF_SB_ATT_IDX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002532 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002533
2534 if (bp->def_idx != def_sb->sp_sb.running_index) {
2535 bp->def_idx = def_sb->sp_sb.running_index;
2536 rc |= BNX2X_DEF_SB_IDX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002537 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002538
2539 /* Do not reorder: indecies reading should complete before handling */
2540 barrier();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002541 return rc;
2542}
2543
2544/*
2545 * slow path service functions
2546 */
2547
2548static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2549{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002550 int port = BP_PORT(bp);
Eilon Greenstein5c862842008-08-13 15:51:48 -07002551 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2552 COMMAND_REG_ATTN_BITS_SET);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002553 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2554 MISC_REG_AEU_MASK_ATTN_FUNC_0;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002555 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2556 NIG_REG_MASK_INTERRUPT_PORT0;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002557 u32 aeu_mask;
Eilon Greenstein87942b42009-02-12 08:36:49 +00002558 u32 nig_mask = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002559
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002560 if (bp->attn_state & asserted)
2561 BNX2X_ERR("IGU ERROR\n");
2562
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002563 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2564 aeu_mask = REG_RD(bp, aeu_addr);
2565
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002566 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002567 aeu_mask, asserted);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002568 aeu_mask &= ~(asserted & 0x3ff);
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002569 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002570
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002571 REG_WR(bp, aeu_addr, aeu_mask);
2572 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002573
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002574 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002575 bp->attn_state |= asserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002576 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002577
2578 if (asserted & ATTN_HARD_WIRED_MASK) {
2579 if (asserted & ATTN_NIG_FOR_FUNC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002580
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002581 bnx2x_acquire_phy_lock(bp);
2582
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002583 /* save nig interrupt mask */
Eilon Greenstein87942b42009-02-12 08:36:49 +00002584 nig_mask = REG_RD(bp, nig_int_mask_addr);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002585 REG_WR(bp, nig_int_mask_addr, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002586
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002587 bnx2x_link_attn(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002588
2589 /* handle unicore attn? */
2590 }
2591 if (asserted & ATTN_SW_TIMER_4_FUNC)
2592 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2593
2594 if (asserted & GPIO_2_FUNC)
2595 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2596
2597 if (asserted & GPIO_3_FUNC)
2598 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2599
2600 if (asserted & GPIO_4_FUNC)
2601 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2602
2603 if (port == 0) {
2604 if (asserted & ATTN_GENERAL_ATTN_1) {
2605 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2606 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2607 }
2608 if (asserted & ATTN_GENERAL_ATTN_2) {
2609 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2610 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2611 }
2612 if (asserted & ATTN_GENERAL_ATTN_3) {
2613 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2614 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2615 }
2616 } else {
2617 if (asserted & ATTN_GENERAL_ATTN_4) {
2618 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2619 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2620 }
2621 if (asserted & ATTN_GENERAL_ATTN_5) {
2622 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2623 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2624 }
2625 if (asserted & ATTN_GENERAL_ATTN_6) {
2626 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2627 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2628 }
2629 }
2630
2631 } /* if hardwired */
2632
Eilon Greenstein5c862842008-08-13 15:51:48 -07002633 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2634 asserted, hc_addr);
2635 REG_WR(bp, hc_addr, asserted);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002636
2637 /* now set back the mask */
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002638 if (asserted & ATTN_NIG_FOR_FUNC) {
Eilon Greenstein87942b42009-02-12 08:36:49 +00002639 REG_WR(bp, nig_int_mask_addr, nig_mask);
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002640 bnx2x_release_phy_lock(bp);
2641 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002642}
2643
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002644static inline void bnx2x_fan_failure(struct bnx2x *bp)
2645{
2646 int port = BP_PORT(bp);
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002647 u32 ext_phy_config;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002648 /* mark the failure */
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002649 ext_phy_config =
2650 SHMEM_RD(bp,
2651 dev_info.port_hw_config[port].external_phy_config);
2652
2653 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2654 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002655 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002656 ext_phy_config);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002657
2658 /* log the failure */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002659 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2660 " the driver to shutdown the card to prevent permanent"
2661 " damage. Please contact OEM Support for assistance\n");
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002662}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00002663
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002664static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2665{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002666 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002667 int reg_offset;
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00002668 u32 val;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002669
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002670 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2671 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002672
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002673 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002674
2675 val = REG_RD(bp, reg_offset);
2676 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2677 REG_WR(bp, reg_offset, val);
2678
2679 BNX2X_ERR("SPIO5 hw attention\n");
2680
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002681 /* Fan failure attention */
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00002682 bnx2x_hw_reset_phy(&bp->link_params);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002683 bnx2x_fan_failure(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002684 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002685
Eilon Greenstein589abe32009-02-12 08:36:55 +00002686 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2687 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2688 bnx2x_acquire_phy_lock(bp);
2689 bnx2x_handle_module_detect_int(&bp->link_params);
2690 bnx2x_release_phy_lock(bp);
2691 }
2692
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002693 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2694
2695 val = REG_RD(bp, reg_offset);
2696 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2697 REG_WR(bp, reg_offset, val);
2698
2699 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002700 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002701 bnx2x_panic();
2702 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002703}
2704
2705static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2706{
2707 u32 val;
2708
Eilon Greenstein0626b892009-02-12 08:38:14 +00002709 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002710
2711 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2712 BNX2X_ERR("DB hw attention 0x%x\n", val);
2713 /* DORQ discard attention */
2714 if (val & 0x2)
2715 BNX2X_ERR("FATAL error from DORQ\n");
2716 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002717
2718 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2719
2720 int port = BP_PORT(bp);
2721 int reg_offset;
2722
2723 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2724 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2725
2726 val = REG_RD(bp, reg_offset);
2727 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2728 REG_WR(bp, reg_offset, val);
2729
2730 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002731 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002732 bnx2x_panic();
2733 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002734}
2735
2736static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2737{
2738 u32 val;
2739
2740 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2741
2742 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2743 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2744 /* CFC error attention */
2745 if (val & 0x2)
2746 BNX2X_ERR("FATAL error from CFC\n");
2747 }
2748
2749 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2750
2751 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2752 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2753 /* RQ_USDMDP_FIFO_OVERFLOW */
2754 if (val & 0x18000)
2755 BNX2X_ERR("FATAL error from PXP\n");
2756 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002757
2758 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2759
2760 int port = BP_PORT(bp);
2761 int reg_offset;
2762
2763 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2764 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2765
2766 val = REG_RD(bp, reg_offset);
2767 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2768 REG_WR(bp, reg_offset, val);
2769
2770 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002771 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002772 bnx2x_panic();
2773 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002774}
2775
2776static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2777{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002778 u32 val;
2779
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002780 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2781
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002782 if (attn & BNX2X_PMF_LINK_ASSERT) {
2783 int func = BP_FUNC(bp);
2784
2785 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002786 bp->mf_config =
2787 MF_CFG_RD(bp, func_mf_config[func].config);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002788 val = SHMEM_RD(bp, func_mb[func].drv_status);
2789 if (val & DRV_STATUS_DCC_EVENT_MASK)
2790 bnx2x_dcc_event(bp,
2791 (val & DRV_STATUS_DCC_EVENT_MASK));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002792 bnx2x__link_status_update(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002793 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002794 bnx2x_pmf_update(bp);
2795
2796 } else if (attn & BNX2X_MC_ASSERT_BITS) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002797
2798 BNX2X_ERR("MC assert!\n");
2799 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2800 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2801 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2802 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2803 bnx2x_panic();
2804
2805 } else if (attn & BNX2X_MCP_ASSERT) {
2806
2807 BNX2X_ERR("MCP assert!\n");
2808 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002809 bnx2x_fw_dump(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002810
2811 } else
2812 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2813 }
2814
2815 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002816 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2817 if (attn & BNX2X_GRC_TIMEOUT) {
2818 val = CHIP_IS_E1H(bp) ?
2819 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2820 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2821 }
2822 if (attn & BNX2X_GRC_RSV) {
2823 val = CHIP_IS_E1H(bp) ?
2824 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2825 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2826 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002827 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002828 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002829}
2830
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002831#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
2832#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
2833#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
2834#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
2835#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
2836#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
2837/*
2838 * should be run under rtnl lock
2839 */
2840static inline void bnx2x_set_reset_done(struct bnx2x *bp)
2841{
2842 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2843 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
2844 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2845 barrier();
2846 mmiowb();
2847}
2848
2849/*
2850 * should be run under rtnl lock
2851 */
2852static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
2853{
2854 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2855 val |= (1 << 16);
2856 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2857 barrier();
2858 mmiowb();
2859}
2860
2861/*
2862 * should be run under rtnl lock
2863 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002864bool bnx2x_reset_is_done(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002865{
2866 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2867 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
2868 return (val & RESET_DONE_FLAG_MASK) ? false : true;
2869}
2870
2871/*
2872 * should be run under rtnl lock
2873 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002874inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002875{
2876 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2877
2878 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2879
2880 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
2881 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2882 barrier();
2883 mmiowb();
2884}
2885
2886/*
2887 * should be run under rtnl lock
2888 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002889u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002890{
2891 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2892
2893 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2894
2895 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
2896 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2897 barrier();
2898 mmiowb();
2899
2900 return val1;
2901}
2902
2903/*
2904 * should be run under rtnl lock
2905 */
2906static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
2907{
2908 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
2909}
2910
2911static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
2912{
2913 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2914 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
2915}
2916
2917static inline void _print_next_block(int idx, const char *blk)
2918{
2919 if (idx)
2920 pr_cont(", ");
2921 pr_cont("%s", blk);
2922}
2923
2924static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
2925{
2926 int i = 0;
2927 u32 cur_bit = 0;
2928 for (i = 0; sig; i++) {
2929 cur_bit = ((u32)0x1 << i);
2930 if (sig & cur_bit) {
2931 switch (cur_bit) {
2932 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
2933 _print_next_block(par_num++, "BRB");
2934 break;
2935 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
2936 _print_next_block(par_num++, "PARSER");
2937 break;
2938 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
2939 _print_next_block(par_num++, "TSDM");
2940 break;
2941 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
2942 _print_next_block(par_num++, "SEARCHER");
2943 break;
2944 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
2945 _print_next_block(par_num++, "TSEMI");
2946 break;
2947 }
2948
2949 /* Clear the bit */
2950 sig &= ~cur_bit;
2951 }
2952 }
2953
2954 return par_num;
2955}
2956
2957static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
2958{
2959 int i = 0;
2960 u32 cur_bit = 0;
2961 for (i = 0; sig; i++) {
2962 cur_bit = ((u32)0x1 << i);
2963 if (sig & cur_bit) {
2964 switch (cur_bit) {
2965 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
2966 _print_next_block(par_num++, "PBCLIENT");
2967 break;
2968 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
2969 _print_next_block(par_num++, "QM");
2970 break;
2971 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
2972 _print_next_block(par_num++, "XSDM");
2973 break;
2974 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
2975 _print_next_block(par_num++, "XSEMI");
2976 break;
2977 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
2978 _print_next_block(par_num++, "DOORBELLQ");
2979 break;
2980 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
2981 _print_next_block(par_num++, "VAUX PCI CORE");
2982 break;
2983 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
2984 _print_next_block(par_num++, "DEBUG");
2985 break;
2986 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
2987 _print_next_block(par_num++, "USDM");
2988 break;
2989 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
2990 _print_next_block(par_num++, "USEMI");
2991 break;
2992 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
2993 _print_next_block(par_num++, "UPB");
2994 break;
2995 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
2996 _print_next_block(par_num++, "CSDM");
2997 break;
2998 }
2999
3000 /* Clear the bit */
3001 sig &= ~cur_bit;
3002 }
3003 }
3004
3005 return par_num;
3006}
3007
3008static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3009{
3010 int i = 0;
3011 u32 cur_bit = 0;
3012 for (i = 0; sig; i++) {
3013 cur_bit = ((u32)0x1 << i);
3014 if (sig & cur_bit) {
3015 switch (cur_bit) {
3016 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3017 _print_next_block(par_num++, "CSEMI");
3018 break;
3019 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3020 _print_next_block(par_num++, "PXP");
3021 break;
3022 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3023 _print_next_block(par_num++,
3024 "PXPPCICLOCKCLIENT");
3025 break;
3026 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3027 _print_next_block(par_num++, "CFC");
3028 break;
3029 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3030 _print_next_block(par_num++, "CDU");
3031 break;
3032 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3033 _print_next_block(par_num++, "IGU");
3034 break;
3035 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3036 _print_next_block(par_num++, "MISC");
3037 break;
3038 }
3039
3040 /* Clear the bit */
3041 sig &= ~cur_bit;
3042 }
3043 }
3044
3045 return par_num;
3046}
3047
3048static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3049{
3050 int i = 0;
3051 u32 cur_bit = 0;
3052 for (i = 0; sig; i++) {
3053 cur_bit = ((u32)0x1 << i);
3054 if (sig & cur_bit) {
3055 switch (cur_bit) {
3056 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3057 _print_next_block(par_num++, "MCP ROM");
3058 break;
3059 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3060 _print_next_block(par_num++, "MCP UMP RX");
3061 break;
3062 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3063 _print_next_block(par_num++, "MCP UMP TX");
3064 break;
3065 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3066 _print_next_block(par_num++, "MCP SCPAD");
3067 break;
3068 }
3069
3070 /* Clear the bit */
3071 sig &= ~cur_bit;
3072 }
3073 }
3074
3075 return par_num;
3076}
3077
3078static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3079 u32 sig2, u32 sig3)
3080{
3081 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3082 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3083 int par_num = 0;
3084 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3085 "[0]:0x%08x [1]:0x%08x "
3086 "[2]:0x%08x [3]:0x%08x\n",
3087 sig0 & HW_PRTY_ASSERT_SET_0,
3088 sig1 & HW_PRTY_ASSERT_SET_1,
3089 sig2 & HW_PRTY_ASSERT_SET_2,
3090 sig3 & HW_PRTY_ASSERT_SET_3);
3091 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3092 bp->dev->name);
3093 par_num = bnx2x_print_blocks_with_parity0(
3094 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3095 par_num = bnx2x_print_blocks_with_parity1(
3096 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3097 par_num = bnx2x_print_blocks_with_parity2(
3098 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3099 par_num = bnx2x_print_blocks_with_parity3(
3100 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3101 printk("\n");
3102 return true;
3103 } else
3104 return false;
3105}
3106
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003107bool bnx2x_chk_parity_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003108{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003109 struct attn_route attn;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003110 int port = BP_PORT(bp);
3111
3112 attn.sig[0] = REG_RD(bp,
3113 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3114 port*4);
3115 attn.sig[1] = REG_RD(bp,
3116 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3117 port*4);
3118 attn.sig[2] = REG_RD(bp,
3119 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3120 port*4);
3121 attn.sig[3] = REG_RD(bp,
3122 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3123 port*4);
3124
3125 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3126 attn.sig[3]);
3127}
3128
3129static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3130{
3131 struct attn_route attn, *group_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003132 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003133 int index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003134 u32 reg_addr;
3135 u32 val;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003136 u32 aeu_mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003137
3138 /* need to take HW lock because MCP or other port might also
3139 try to handle this event */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003140 bnx2x_acquire_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003141
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003142 if (bnx2x_chk_parity_attn(bp)) {
3143 bp->recovery_state = BNX2X_RECOVERY_INIT;
3144 bnx2x_set_reset_in_progress(bp);
3145 schedule_delayed_work(&bp->reset_task, 0);
3146 /* Disable HW interrupts */
3147 bnx2x_int_disable(bp);
3148 bnx2x_release_alr(bp);
3149 /* In case of parity errors don't handle attentions so that
3150 * other function would "see" parity errors.
3151 */
3152 return;
3153 }
3154
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003155 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3156 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3157 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3158 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003159 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3160 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003161
3162 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3163 if (deasserted & (1 << index)) {
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003164 group_mask = &bp->attn_group[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003165
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003166 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003167 index, group_mask->sig[0], group_mask->sig[1],
3168 group_mask->sig[2], group_mask->sig[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003169
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003170 bnx2x_attn_int_deasserted3(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003171 attn.sig[3] & group_mask->sig[3]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003172 bnx2x_attn_int_deasserted1(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003173 attn.sig[1] & group_mask->sig[1]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003174 bnx2x_attn_int_deasserted2(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003175 attn.sig[2] & group_mask->sig[2]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003176 bnx2x_attn_int_deasserted0(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003177 attn.sig[0] & group_mask->sig[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003178 }
3179 }
3180
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003181 bnx2x_release_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003182
Eilon Greenstein5c862842008-08-13 15:51:48 -07003183 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003184
3185 val = ~deasserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003186 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3187 val, reg_addr);
Eilon Greenstein5c862842008-08-13 15:51:48 -07003188 REG_WR(bp, reg_addr, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003189
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003190 if (~bp->attn_state & deasserted)
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003191 BNX2X_ERR("IGU ERROR\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003192
3193 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3194 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3195
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003196 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3197 aeu_mask = REG_RD(bp, reg_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003198
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003199 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3200 aeu_mask, deasserted);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003201 aeu_mask |= (deasserted & 0x3ff);
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003202 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3203
3204 REG_WR(bp, reg_addr, aeu_mask);
3205 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003206
3207 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3208 bp->attn_state &= ~deasserted;
3209 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3210}
3211
3212static void bnx2x_attn_int(struct bnx2x *bp)
3213{
3214 /* read local copy of bits */
Eilon Greenstein68d59482009-01-14 21:27:36 -08003215 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3216 attn_bits);
3217 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3218 attn_bits_ack);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003219 u32 attn_state = bp->attn_state;
3220
3221 /* look for changed bits */
3222 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3223 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3224
3225 DP(NETIF_MSG_HW,
3226 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3227 attn_bits, attn_ack, asserted, deasserted);
3228
3229 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003230 BNX2X_ERR("BAD attention state\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003231
3232 /* handle bits that were raised */
3233 if (asserted)
3234 bnx2x_attn_int_asserted(bp, asserted);
3235
3236 if (deasserted)
3237 bnx2x_attn_int_deasserted(bp, deasserted);
3238}
3239
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003240static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3241{
3242 /* No memory barriers */
3243 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3244 mmiowb(); /* keep prod updates ordered */
3245}
3246
3247#ifdef BCM_CNIC
3248static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3249 union event_ring_elem *elem)
3250{
3251 if (!bp->cnic_eth_dev.starting_cid ||
3252 cid < bp->cnic_eth_dev.starting_cid)
3253 return 1;
3254
3255 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3256
3257 if (unlikely(elem->message.data.cfc_del_event.error)) {
3258 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3259 cid);
3260 bnx2x_panic_dump(bp);
3261 }
3262 bnx2x_cnic_cfc_comp(bp, cid);
3263 return 0;
3264}
3265#endif
3266
3267static void bnx2x_eq_int(struct bnx2x *bp)
3268{
3269 u16 hw_cons, sw_cons, sw_prod;
3270 union event_ring_elem *elem;
3271 u32 cid;
3272 u8 opcode;
3273 int spqe_cnt = 0;
3274
3275 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3276
3277 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3278 * when we get the the next-page we nned to adjust so the loop
3279 * condition below will be met. The next element is the size of a
3280 * regular element and hence incrementing by 1
3281 */
3282 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3283 hw_cons++;
3284
3285 /* This function may never run in parralel with itself for a
3286 * specific bp, thus there is no need in "paired" read memory
3287 * barrier here.
3288 */
3289 sw_cons = bp->eq_cons;
3290 sw_prod = bp->eq_prod;
3291
3292 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n",
3293 hw_cons, sw_cons, bp->spq_left);
3294
3295 for (; sw_cons != hw_cons;
3296 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3297
3298
3299 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3300
3301 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3302 opcode = elem->message.opcode;
3303
3304
3305 /* handle eq element */
3306 switch (opcode) {
3307 case EVENT_RING_OPCODE_STAT_QUERY:
3308 DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3309 /* nothing to do with stats comp */
3310 continue;
3311
3312 case EVENT_RING_OPCODE_CFC_DEL:
3313 /* handle according to cid range */
3314 /*
3315 * we may want to verify here that the bp state is
3316 * HALTING
3317 */
3318 DP(NETIF_MSG_IFDOWN,
3319 "got delete ramrod for MULTI[%d]\n", cid);
3320#ifdef BCM_CNIC
3321 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3322 goto next_spqe;
3323#endif
3324 bnx2x_fp(bp, cid, state) =
3325 BNX2X_FP_STATE_CLOSED;
3326
3327 goto next_spqe;
3328 }
3329
3330 switch (opcode | bp->state) {
3331 case (EVENT_RING_OPCODE_FUNCTION_START |
3332 BNX2X_STATE_OPENING_WAIT4_PORT):
3333 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3334 bp->state = BNX2X_STATE_FUNC_STARTED;
3335 break;
3336
3337 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3338 BNX2X_STATE_CLOSING_WAIT4_HALT):
3339 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3340 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3341 break;
3342
3343 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3344 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3345 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3346 bp->set_mac_pending = 0;
3347 break;
3348
3349 case (EVENT_RING_OPCODE_SET_MAC |
3350 BNX2X_STATE_CLOSING_WAIT4_HALT):
3351 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3352 bp->set_mac_pending = 0;
3353 break;
3354 default:
3355 /* unknown event log error and continue */
3356 BNX2X_ERR("Unknown EQ event %d\n",
3357 elem->message.opcode);
3358 }
3359next_spqe:
3360 spqe_cnt++;
3361 } /* for */
3362
3363 bp->spq_left++;
3364
3365 bp->eq_cons = sw_cons;
3366 bp->eq_prod = sw_prod;
3367 /* Make sure that above mem writes were issued towards the memory */
3368 smp_wmb();
3369
3370 /* update producer */
3371 bnx2x_update_eq_prod(bp, bp->eq_prod);
3372}
3373
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003374static void bnx2x_sp_task(struct work_struct *work)
3375{
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003376 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003377 u16 status;
3378
3379 /* Return here if interrupt is disabled */
3380 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003381 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003382 return;
3383 }
3384
3385 status = bnx2x_update_dsb_idx(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003386/* if (status == 0) */
3387/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003388
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003389 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003390
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003391 /* HW attentions */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003392 if (status & BNX2X_DEF_SB_ATT_IDX) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003393 bnx2x_attn_int(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003394 status &= ~BNX2X_DEF_SB_ATT_IDX;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003395 }
3396
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003397 /* SP events: STAT_QUERY and others */
3398 if (status & BNX2X_DEF_SB_IDX) {
3399
3400 /* Handle EQ completions */
3401 bnx2x_eq_int(bp);
3402
3403 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3404 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3405
3406 status &= ~BNX2X_DEF_SB_IDX;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003407 }
3408
3409 if (unlikely(status))
3410 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3411 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003412
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003413 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3414 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003415}
3416
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003417irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003418{
3419 struct net_device *dev = dev_instance;
3420 struct bnx2x *bp = netdev_priv(dev);
3421
3422 /* Return here if interrupt is disabled */
3423 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003424 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003425 return IRQ_HANDLED;
3426 }
3427
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003428 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3429 IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003430
3431#ifdef BNX2X_STOP_ON_ERROR
3432 if (unlikely(bp->panic))
3433 return IRQ_HANDLED;
3434#endif
3435
Michael Chan993ac7b2009-10-10 13:46:56 +00003436#ifdef BCM_CNIC
3437 {
3438 struct cnic_ops *c_ops;
3439
3440 rcu_read_lock();
3441 c_ops = rcu_dereference(bp->cnic_ops);
3442 if (c_ops)
3443 c_ops->cnic_handler(bp->cnic_data, NULL);
3444 rcu_read_unlock();
3445 }
3446#endif
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003447 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003448
3449 return IRQ_HANDLED;
3450}
3451
3452/* end of slow path */
3453
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003454static void bnx2x_timer(unsigned long data)
3455{
3456 struct bnx2x *bp = (struct bnx2x *) data;
3457
3458 if (!netif_running(bp->dev))
3459 return;
3460
3461 if (atomic_read(&bp->intr_sem) != 0)
Eliezer Tamirf1410642008-02-28 11:51:50 -08003462 goto timer_restart;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003463
3464 if (poll) {
3465 struct bnx2x_fastpath *fp = &bp->fp[0];
3466 int rc;
3467
Eilon Greenstein7961f792009-03-02 07:59:31 +00003468 bnx2x_tx_int(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003469 rc = bnx2x_rx_int(fp, 1000);
3470 }
3471
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003472 if (!BP_NOMCP(bp)) {
3473 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003474 u32 drv_pulse;
3475 u32 mcp_pulse;
3476
3477 ++bp->fw_drv_pulse_wr_seq;
3478 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3479 /* TBD - add SYSTEM_TIME */
3480 drv_pulse = bp->fw_drv_pulse_wr_seq;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003481 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003482
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003483 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003484 MCP_PULSE_SEQ_MASK);
3485 /* The delta between driver pulse and mcp response
3486 * should be 1 (before mcp response) or 0 (after mcp response)
3487 */
3488 if ((drv_pulse != mcp_pulse) &&
3489 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3490 /* someone lost a heartbeat... */
3491 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3492 drv_pulse, mcp_pulse);
3493 }
3494 }
3495
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07003496 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003497 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003498
Eliezer Tamirf1410642008-02-28 11:51:50 -08003499timer_restart:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003500 mod_timer(&bp->timer, jiffies + bp->current_interval);
3501}
3502
3503/* end of Statistics */
3504
3505/* nic init */
3506
3507/*
3508 * nic init service functions
3509 */
3510
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003511static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003512{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003513 u32 i;
3514 if (!(len%4) && !(addr%4))
3515 for (i = 0; i < len; i += 4)
3516 REG_WR(bp, addr + i, fill);
3517 else
3518 for (i = 0; i < len; i++)
3519 REG_WR8(bp, addr + i, fill);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003520
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003521}
3522
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003523/* helper: writes FP SP data to FW - data_size in dwords */
3524static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3525 int fw_sb_id,
3526 u32 *sb_data_p,
3527 u32 data_size)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003528{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003529 int index;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003530 for (index = 0; index < data_size; index++)
3531 REG_WR(bp, BAR_CSTRORM_INTMEM +
3532 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3533 sizeof(u32)*index,
3534 *(sb_data_p + index));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003535}
3536
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003537static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3538{
3539 u32 *sb_data_p;
3540 u32 data_size = 0;
3541 struct hc_status_block_data_e1x sb_data_e1x;
3542
3543 /* disable the function first */
3544 memset(&sb_data_e1x, 0,
3545 sizeof(struct hc_status_block_data_e1x));
3546 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3547 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3548 sb_data_e1x.common.p_func.vf_valid = false;
3549 sb_data_p = (u32 *)&sb_data_e1x;
3550 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3551
3552 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3553
3554 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3555 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
3556 CSTORM_STATUS_BLOCK_SIZE);
3557 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3558 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
3559 CSTORM_SYNC_BLOCK_SIZE);
3560}
3561
3562/* helper: writes SP SB data to FW */
3563static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
3564 struct hc_sp_status_block_data *sp_sb_data)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003565{
3566 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003567 int i;
3568 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
3569 REG_WR(bp, BAR_CSTRORM_INTMEM +
3570 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
3571 i*sizeof(u32),
3572 *((u32 *)sp_sb_data + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003573}
3574
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003575static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
3576{
3577 int func = BP_FUNC(bp);
3578 struct hc_sp_status_block_data sp_sb_data;
3579 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
3580
3581 sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
3582 sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
3583 sp_sb_data.p_func.vf_valid = false;
3584
3585 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
3586
3587 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3588 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
3589 CSTORM_SP_STATUS_BLOCK_SIZE);
3590 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3591 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
3592 CSTORM_SP_SYNC_BLOCK_SIZE);
3593
3594}
3595
3596
3597static inline
3598void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
3599 int igu_sb_id, int igu_seg_id)
3600{
3601 hc_sm->igu_sb_id = igu_sb_id;
3602 hc_sm->igu_seg_id = igu_seg_id;
3603 hc_sm->timer_value = 0xFF;
3604 hc_sm->time_to_expire = 0xFFFFFFFF;
3605}
3606
3607void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
3608 u8 vf_valid, int fw_sb_id, int igu_sb_id)
3609{
3610 int igu_seg_id;
3611
3612 struct hc_status_block_data_e1x sb_data_e1x;
3613 struct hc_status_block_sm *hc_sm_p;
3614 struct hc_index_data *hc_index_p;
3615 int data_size;
3616 u32 *sb_data_p;
3617
3618 igu_seg_id = HC_SEG_ACCESS_NORM;
3619
3620 bnx2x_zero_fp_sb(bp, fw_sb_id);
3621
3622 memset(&sb_data_e1x, 0,
3623 sizeof(struct hc_status_block_data_e1x));
3624 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
3625 sb_data_e1x.common.p_func.vf_id = 0xff;
3626 sb_data_e1x.common.p_func.vf_valid = false;
3627 sb_data_e1x.common.p_func.vnic_id = BP_E1HVN(bp);
3628 sb_data_e1x.common.same_igu_sb_1b = true;
3629 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
3630 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
3631 hc_sm_p = sb_data_e1x.common.state_machine;
3632 hc_index_p = sb_data_e1x.index_data;
3633 sb_data_p = (u32 *)&sb_data_e1x;
3634 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3635
3636
3637 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
3638 igu_sb_id, igu_seg_id);
3639 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
3640 igu_sb_id, igu_seg_id);
3641
3642 DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
3643
3644 /* write indecies to HW */
3645 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3646}
3647
3648static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
3649 u8 sb_index, u8 disable, u16 usec)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003650{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003651 int port = BP_PORT(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003652 u8 ticks = usec / BNX2X_BTR;
3653
3654 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3655
3656 disable = disable ? 1 : (usec ? 0 : 1);
3657 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
3658}
3659
3660static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
3661 u16 tx_usec, u16 rx_usec)
3662{
3663 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
3664 false, rx_usec);
3665 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
3666 false, tx_usec);
3667}
3668static void bnx2x_init_def_sb(struct bnx2x *bp)
3669{
3670 struct host_sp_status_block *def_sb = bp->def_status_blk;
3671 dma_addr_t mapping = bp->def_status_blk_mapping;
3672 int igu_sp_sb_index;
3673 int igu_seg_id;
3674 int port = BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003675 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003676 int reg_offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003677 u64 section;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003678 int index;
3679 struct hc_sp_status_block_data sp_sb_data;
3680 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
3681
3682 igu_sp_sb_index = DEF_SB_IGU_ID;
3683 igu_seg_id = HC_SEG_ACCESS_DEF;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003684
3685 /* ATTN */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003686 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003687 atten_status_block);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003688 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003689
Eliezer Tamir49d66772008-02-28 11:53:13 -08003690 bp->attn_state = 0;
3691
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003692 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
3693 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003694 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003695 int sindex;
3696 /* take care of sig[0]..sig[4] */
3697 for (sindex = 0; sindex < 4; sindex++)
3698 bp->attn_group[index].sig[sindex] =
3699 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003700 }
3701
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003702 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
3703 HC_REG_ATTN_MSG0_ADDR_L);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003704 REG_WR(bp, reg_offset, U64_LO(section));
3705 REG_WR(bp, reg_offset + 4, U64_HI(section));
3706
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003707 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
3708 sp_sb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003709
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003710 bnx2x_zero_sp_sb(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003711
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003712 sp_sb_data.host_sb_addr.lo = U64_LO(section);
3713 sp_sb_data.host_sb_addr.hi = U64_HI(section);
3714 sp_sb_data.igu_sb_id = igu_sp_sb_index;
3715 sp_sb_data.igu_seg_id = igu_seg_id;
3716 sp_sb_data.p_func.pf_id = func;
3717 sp_sb_data.p_func.vnic_id = BP_E1HVN(bp);
3718 sp_sb_data.p_func.vf_id = 0xff;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003719
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003720 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003721
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003722 bp->stats_pending = 0;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07003723 bp->set_mac_pending = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003724
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003725 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003726}
3727
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003728void bnx2x_update_coalesce(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003729{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003730 int i;
3731
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003732 for_each_queue(bp, i)
3733 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
3734 bp->rx_ticks, bp->tx_ticks);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003735}
3736
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003737static void bnx2x_init_sp_ring(struct bnx2x *bp)
3738{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003739 spin_lock_init(&bp->spq_lock);
3740
3741 bp->spq_left = MAX_SPQ_PENDING;
3742 bp->spq_prod_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003743 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
3744 bp->spq_prod_bd = bp->spq;
3745 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003746}
3747
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003748static void bnx2x_init_eq_ring(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003749{
3750 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003751 for (i = 1; i <= NUM_EQ_PAGES; i++) {
3752 union event_ring_elem *elem =
3753 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003754
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003755 elem->next_page.addr.hi =
3756 cpu_to_le32(U64_HI(bp->eq_mapping +
3757 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
3758 elem->next_page.addr.lo =
3759 cpu_to_le32(U64_LO(bp->eq_mapping +
3760 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003761 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003762 bp->eq_cons = 0;
3763 bp->eq_prod = NUM_EQ_DESC;
3764 bp->eq_cons_sb = BNX2X_EQ_INDEX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003765}
3766
3767static void bnx2x_init_ind_table(struct bnx2x *bp)
3768{
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08003769 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003770 int i;
3771
Eilon Greenstein555f6c72009-02-12 08:36:11 +00003772 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003773 return;
3774
Eilon Greenstein555f6c72009-02-12 08:36:11 +00003775 DP(NETIF_MSG_IFUP,
3776 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003777 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003778 REG_WR8(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08003779 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00003780 bp->fp->cl_id + (i % bp->num_queues));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003781}
3782
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003783void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003784{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003785 int mode = bp->rx_mode;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003786 u16 cl_id;
3787
Eilon Greenstein581ce432009-07-29 00:20:04 +00003788 /* All but management unicast packets should pass to the host as well */
3789 u32 llh_mask =
3790 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
3791 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
3792 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
3793 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003794
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003795 switch (mode) {
3796 case BNX2X_RX_MODE_NONE: /* no Rx */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003797 cl_id = BP_L_ID(bp);
3798 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003799 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00003800
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003801 case BNX2X_RX_MODE_NORMAL:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003802 cl_id = BP_L_ID(bp);
3803 bnx2x_rxq_set_mac_filters(bp, cl_id,
3804 BNX2X_ACCEPT_UNICAST |
3805 BNX2X_ACCEPT_BROADCAST |
3806 BNX2X_ACCEPT_MULTICAST);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003807 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00003808
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003809 case BNX2X_RX_MODE_ALLMULTI:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003810 cl_id = BP_L_ID(bp);
3811 bnx2x_rxq_set_mac_filters(bp, cl_id,
3812 BNX2X_ACCEPT_UNICAST |
3813 BNX2X_ACCEPT_BROADCAST |
3814 BNX2X_ACCEPT_ALL_MULTICAST);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003815 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00003816
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003817 case BNX2X_RX_MODE_PROMISC:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003818 cl_id = BP_L_ID(bp);
3819 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_PROMISCUOUS_MODE);
3820
Eilon Greenstein581ce432009-07-29 00:20:04 +00003821 /* pass management unicast packets as well */
3822 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003823 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00003824
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003825 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003826 BNX2X_ERR("BAD rx mode (%d)\n", mode);
3827 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003828 }
3829
Eilon Greenstein581ce432009-07-29 00:20:04 +00003830 REG_WR(bp,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003831 BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
3832 NIG_REG_LLH0_BRB1_DRV_MASK,
Eilon Greenstein581ce432009-07-29 00:20:04 +00003833 llh_mask);
3834
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003835 DP(NETIF_MSG_IFUP, "rx mode %d\n"
3836 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
3837 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n", mode,
3838 bp->mac_filters.ucast_drop_all,
3839 bp->mac_filters.mcast_drop_all,
3840 bp->mac_filters.bcast_drop_all,
3841 bp->mac_filters.ucast_accept_all,
3842 bp->mac_filters.mcast_accept_all,
3843 bp->mac_filters.bcast_accept_all
3844 );
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003845
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003846 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003847}
3848
Eilon Greenstein471de712008-08-13 15:49:35 -07003849static void bnx2x_init_internal_common(struct bnx2x *bp)
3850{
3851 int i;
3852
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003853 if (!CHIP_IS_E1(bp)) {
3854
3855 /* xstorm needs to know whether to add ovlan to packets or not,
3856 * in switch-independent we'll write 0 to here... */
3857 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
3858 bp->e1hmf);
3859 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
3860 bp->e1hmf);
3861 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
3862 bp->e1hmf);
3863 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
3864 bp->e1hmf);
3865 }
3866
Eilon Greenstein471de712008-08-13 15:49:35 -07003867 /* Zero this manually as its initialization is
3868 currently missing in the initTool */
3869 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
3870 REG_WR(bp, BAR_USTRORM_INTMEM +
3871 USTORM_AGG_DATA_OFFSET + i * 4, 0);
3872}
3873
3874static void bnx2x_init_internal_port(struct bnx2x *bp)
3875{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003876 /* port */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003877}
3878
Eilon Greenstein471de712008-08-13 15:49:35 -07003879static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
3880{
3881 switch (load_code) {
3882 case FW_MSG_CODE_DRV_LOAD_COMMON:
3883 bnx2x_init_internal_common(bp);
3884 /* no break */
3885
3886 case FW_MSG_CODE_DRV_LOAD_PORT:
3887 bnx2x_init_internal_port(bp);
3888 /* no break */
3889
3890 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003891 /* internal memory per function is
3892 initialized inside bnx2x_pf_init */
Eilon Greenstein471de712008-08-13 15:49:35 -07003893 break;
3894
3895 default:
3896 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
3897 break;
3898 }
3899}
3900
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003901static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
3902{
3903 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
3904
3905 fp->state = BNX2X_FP_STATE_CLOSED;
3906
3907 fp->index = fp->cid = fp_idx;
3908 fp->cl_id = BP_L_ID(bp) + fp_idx;
3909 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
3910 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
3911 /* qZone id equals to FW (per path) client id */
3912 fp->cl_qzone_id = fp->cl_id +
3913 BP_PORT(bp)*(ETH_MAX_RX_CLIENTS_E1H);
3914 /* init shortcut */
3915 fp->ustorm_rx_prods_offset =
3916 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
3917 /* Setup SB indicies */
3918 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
3919 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
3920
3921 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
3922 "cl_id %d fw_sb %d igu_sb %d\n",
3923 fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
3924 fp->igu_sb_id);
3925 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
3926 fp->fw_sb_id, fp->igu_sb_id);
3927
3928 bnx2x_update_fpsb_idx(fp);
3929}
3930
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003931void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003932{
3933 int i;
3934
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003935 for_each_queue(bp, i)
3936 bnx2x_init_fp_sb(bp, i);
Michael Chan37b091b2009-10-10 13:46:55 +00003937#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003938
3939 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
3940 BNX2X_VF_ID_INVALID, false,
3941 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
3942
Michael Chan37b091b2009-10-10 13:46:55 +00003943#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003944
Eilon Greenstein16119782009-03-02 07:59:27 +00003945 /* ensure status block indices were read */
3946 rmb();
3947
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003948 bnx2x_init_def_sb(bp);
Eilon Greenstein5c862842008-08-13 15:51:48 -07003949 bnx2x_update_dsb_idx(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003950 bnx2x_init_rx_rings(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003951 bnx2x_init_tx_rings(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003952 bnx2x_init_sp_ring(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003953 bnx2x_init_eq_ring(bp);
Eilon Greenstein471de712008-08-13 15:49:35 -07003954 bnx2x_init_internal(bp, load_code);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003955 bnx2x_pf_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003956 bnx2x_init_ind_table(bp);
Eilon Greenstein0ef00452009-01-14 21:31:08 -08003957 bnx2x_stats_init(bp);
3958
3959 /* At this point, we are ready for interrupts */
3960 atomic_set(&bp->intr_sem, 0);
3961
3962 /* flush all before enabling interrupts */
3963 mb();
3964 mmiowb();
3965
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08003966 bnx2x_int_enable(bp);
Eilon Greensteineb8da202009-07-21 05:47:30 +00003967
3968 /* Check for SPIO5 */
3969 bnx2x_attn_int_deasserted0(bp,
3970 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
3971 AEU_INPUTS_ATTN_BITS_SPIO5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003972}
3973
3974/* end of nic init */
3975
3976/*
3977 * gzip service functions
3978 */
3979
3980static int bnx2x_gunzip_init(struct bnx2x *bp)
3981{
FUJITA Tomonori1a983142010-04-04 01:51:03 +00003982 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
3983 &bp->gunzip_mapping, GFP_KERNEL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003984 if (bp->gunzip_buf == NULL)
3985 goto gunzip_nomem1;
3986
3987 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
3988 if (bp->strm == NULL)
3989 goto gunzip_nomem2;
3990
3991 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
3992 GFP_KERNEL);
3993 if (bp->strm->workspace == NULL)
3994 goto gunzip_nomem3;
3995
3996 return 0;
3997
3998gunzip_nomem3:
3999 kfree(bp->strm);
4000 bp->strm = NULL;
4001
4002gunzip_nomem2:
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004003 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4004 bp->gunzip_mapping);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004005 bp->gunzip_buf = NULL;
4006
4007gunzip_nomem1:
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004008 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4009 " un-compression\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004010 return -ENOMEM;
4011}
4012
4013static void bnx2x_gunzip_end(struct bnx2x *bp)
4014{
4015 kfree(bp->strm->workspace);
4016
4017 kfree(bp->strm);
4018 bp->strm = NULL;
4019
4020 if (bp->gunzip_buf) {
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004021 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4022 bp->gunzip_mapping);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004023 bp->gunzip_buf = NULL;
4024 }
4025}
4026
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004027static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004028{
4029 int n, rc;
4030
4031 /* check gzip header */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004032 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4033 BNX2X_ERR("Bad gzip header\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004034 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004035 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004036
4037 n = 10;
4038
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004039#define FNAME 0x8
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004040
4041 if (zbuf[3] & FNAME)
4042 while ((zbuf[n++] != 0) && (n < len));
4043
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004044 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004045 bp->strm->avail_in = len - n;
4046 bp->strm->next_out = bp->gunzip_buf;
4047 bp->strm->avail_out = FW_BUF_SIZE;
4048
4049 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4050 if (rc != Z_OK)
4051 return rc;
4052
4053 rc = zlib_inflate(bp->strm, Z_FINISH);
4054 if ((rc != Z_OK) && (rc != Z_STREAM_END))
Joe Perches7995c642010-02-17 15:01:52 +00004055 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4056 bp->strm->msg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004057
4058 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4059 if (bp->gunzip_outlen & 0x3)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004060 netdev_err(bp->dev, "Firmware decompression error:"
4061 " gunzip_outlen (%d) not aligned\n",
4062 bp->gunzip_outlen);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004063 bp->gunzip_outlen >>= 2;
4064
4065 zlib_inflateEnd(bp->strm);
4066
4067 if (rc == Z_STREAM_END)
4068 return 0;
4069
4070 return rc;
4071}
4072
4073/* nic load/unload */
4074
4075/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004076 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004077 */
4078
4079/* send a NIG loopback debug packet */
4080static void bnx2x_lb_pckt(struct bnx2x *bp)
4081{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004082 u32 wb_write[3];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004083
4084 /* Ethernet source and destination addresses */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004085 wb_write[0] = 0x55555555;
4086 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004087 wb_write[2] = 0x20; /* SOP */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004088 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004089
4090 /* NON-IP protocol */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004091 wb_write[0] = 0x09000000;
4092 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004093 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004094 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004095}
4096
4097/* some of the internal memories
4098 * are not directly readable from the driver
4099 * to test them we send debug packets
4100 */
4101static int bnx2x_int_mem_test(struct bnx2x *bp)
4102{
4103 int factor;
4104 int count, i;
4105 u32 val = 0;
4106
Eilon Greensteinad8d3942008-06-23 20:29:02 -07004107 if (CHIP_REV_IS_FPGA(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004108 factor = 120;
Eilon Greensteinad8d3942008-06-23 20:29:02 -07004109 else if (CHIP_REV_IS_EMUL(bp))
4110 factor = 200;
4111 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004112 factor = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004113
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004114 /* Disable inputs of parser neighbor blocks */
4115 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4116 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4117 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004118 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004119
4120 /* Write 0 to parser credits for CFC search request */
4121 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4122
4123 /* send Ethernet packet */
4124 bnx2x_lb_pckt(bp);
4125
4126 /* TODO do i reset NIG statistic? */
4127 /* Wait until NIG register shows 1 packet of size 0x10 */
4128 count = 1000 * factor;
4129 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004130
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004131 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4132 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004133 if (val == 0x10)
4134 break;
4135
4136 msleep(10);
4137 count--;
4138 }
4139 if (val != 0x10) {
4140 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4141 return -1;
4142 }
4143
4144 /* Wait until PRS register shows 1 packet */
4145 count = 1000 * factor;
4146 while (count) {
4147 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004148 if (val == 1)
4149 break;
4150
4151 msleep(10);
4152 count--;
4153 }
4154 if (val != 0x1) {
4155 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4156 return -2;
4157 }
4158
4159 /* Reset and init BRB, PRS */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004160 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004161 msleep(50);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004162 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004163 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004164 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4165 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004166
4167 DP(NETIF_MSG_HW, "part2\n");
4168
4169 /* Disable inputs of parser neighbor blocks */
4170 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4171 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4172 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004173 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004174
4175 /* Write 0 to parser credits for CFC search request */
4176 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4177
4178 /* send 10 Ethernet packets */
4179 for (i = 0; i < 10; i++)
4180 bnx2x_lb_pckt(bp);
4181
4182 /* Wait until NIG register shows 10 + 1
4183 packets of size 11*0x10 = 0xb0 */
4184 count = 1000 * factor;
4185 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004186
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004187 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4188 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004189 if (val == 0xb0)
4190 break;
4191
4192 msleep(10);
4193 count--;
4194 }
4195 if (val != 0xb0) {
4196 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4197 return -3;
4198 }
4199
4200 /* Wait until PRS register shows 2 packets */
4201 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4202 if (val != 2)
4203 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4204
4205 /* Write 1 to parser credits for CFC search request */
4206 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4207
4208 /* Wait until PRS register shows 3 packets */
4209 msleep(10 * factor);
4210 /* Wait until NIG register shows 1 packet of size 0x10 */
4211 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4212 if (val != 3)
4213 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4214
4215 /* clear NIG EOP FIFO */
4216 for (i = 0; i < 11; i++)
4217 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4218 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4219 if (val != 1) {
4220 BNX2X_ERR("clear of NIG failed\n");
4221 return -4;
4222 }
4223
4224 /* Reset and init BRB, PRS, NIG */
4225 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4226 msleep(50);
4227 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4228 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004229 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4230 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00004231#ifndef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004232 /* set NIC mode */
4233 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4234#endif
4235
4236 /* Enable inputs of parser neighbor blocks */
4237 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4238 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4239 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004240 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004241
4242 DP(NETIF_MSG_HW, "done\n");
4243
4244 return 0; /* OK */
4245}
4246
4247static void enable_blocks_attention(struct bnx2x *bp)
4248{
4249 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4250 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
4251 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4252 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4253 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4254 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4255 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4256 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4257 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004258/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4259/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004260 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4261 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4262 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004263/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4264/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004265 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4266 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4267 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4268 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004269/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4270/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
4271 if (CHIP_REV_IS_FPGA(bp))
4272 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
4273 else
4274 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004275 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4276 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4277 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004278/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4279/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004280 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4281 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004282/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4283 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004284}
4285
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004286static const struct {
4287 u32 addr;
4288 u32 mask;
4289} bnx2x_parity_mask[] = {
4290 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
4291 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
4292 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
4293 {HC_REG_HC_PRTY_MASK, 0xffffffff},
4294 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
4295 {QM_REG_QM_PRTY_MASK, 0x0},
4296 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
4297 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
4298 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
4299 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
4300 {CDU_REG_CDU_PRTY_MASK, 0x0},
4301 {CFC_REG_CFC_PRTY_MASK, 0x0},
4302 {DBG_REG_DBG_PRTY_MASK, 0x0},
4303 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
4304 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
4305 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
4306 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
4307 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
4308 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
4309 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
4310 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
4311 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
4312 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
4313 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
4314 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
4315 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
4316 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
4317 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
4318};
4319
4320static void enable_blocks_parity(struct bnx2x *bp)
4321{
Nikitas Angelinascbd9da72010-09-08 11:20:37 +00004322 int i;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004323
Nikitas Angelinascbd9da72010-09-08 11:20:37 +00004324 for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004325 REG_WR(bp, bnx2x_parity_mask[i].addr,
4326 bnx2x_parity_mask[i].mask);
4327}
4328
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004329
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00004330static void bnx2x_reset_common(struct bnx2x *bp)
4331{
4332 /* reset_common */
4333 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4334 0xd3ffff7f);
4335 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4336}
4337
Eilon Greenstein573f2032009-08-12 08:24:14 +00004338static void bnx2x_init_pxp(struct bnx2x *bp)
4339{
4340 u16 devctl;
4341 int r_order, w_order;
4342
4343 pci_read_config_word(bp->pdev,
4344 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4345 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4346 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4347 if (bp->mrrs == -1)
4348 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4349 else {
4350 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4351 r_order = bp->mrrs;
4352 }
4353
4354 bnx2x_init_pxp_arb(bp, r_order, w_order);
4355}
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004356
4357static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4358{
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004359 int is_required;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004360 u32 val;
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004361 int port;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004362
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004363 if (BP_NOMCP(bp))
4364 return;
4365
4366 is_required = 0;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004367 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4368 SHARED_HW_CFG_FAN_FAILURE_MASK;
4369
4370 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4371 is_required = 1;
4372
4373 /*
4374 * The fan failure mechanism is usually related to the PHY type since
4375 * the power consumption of the board is affected by the PHY. Currently,
4376 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4377 */
4378 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4379 for (port = PORT_0; port < PORT_MAX; port++) {
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004380 is_required |=
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004381 bnx2x_fan_failure_det_req(
4382 bp,
4383 bp->common.shmem_base,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00004384 bp->common.shmem2_base,
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004385 port);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004386 }
4387
4388 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4389
4390 if (is_required == 0)
4391 return;
4392
4393 /* Fan failure is indicated by SPIO 5 */
4394 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4395 MISC_REGISTERS_SPIO_INPUT_HI_Z);
4396
4397 /* set to active low mode */
4398 val = REG_RD(bp, MISC_REG_SPIO_INT);
4399 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004400 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004401 REG_WR(bp, MISC_REG_SPIO_INT, val);
4402
4403 /* enable interrupt to signal the IGU */
4404 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4405 val |= (1 << MISC_REGISTERS_SPIO_5);
4406 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4407}
4408
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004409static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004410{
4411 u32 val, i;
4412
4413 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
4414
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00004415 bnx2x_reset_common(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004416 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4417 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
4418
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004419 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004420 if (CHIP_IS_E1H(bp))
4421 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
4422
4423 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
4424 msleep(30);
4425 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
4426
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004427 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004428 if (CHIP_IS_E1(bp)) {
4429 /* enable HW interrupt from PXP on USDM overflow
4430 bit 16 on INT_MASK_0 */
4431 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004432 }
4433
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004434 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004435 bnx2x_init_pxp(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004436
4437#ifdef __BIG_ENDIAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004438 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
4439 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
4440 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
4441 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
4442 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
Eilon Greenstein8badd272009-02-12 08:36:15 +00004443 /* make sure this value is 0 */
4444 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004445
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004446/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
4447 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
4448 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
4449 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
4450 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004451#endif
4452
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004453 bnx2x_ilt_init_page_size(bp, INITOP_SET);
4454
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004455
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004456 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
4457 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004458
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004459 /* let the HW do it's magic ... */
4460 msleep(100);
4461 /* finish PXP init */
4462 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
4463 if (val != 1) {
4464 BNX2X_ERR("PXP2 CFG failed\n");
4465 return -EBUSY;
4466 }
4467 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
4468 if (val != 1) {
4469 BNX2X_ERR("PXP2 RD_INIT failed\n");
4470 return -EBUSY;
4471 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004472
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004473 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
4474 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004475
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004476 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004477
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004478 /* clean the DMAE memory */
4479 bp->dmae_ready = 1;
4480 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004481
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004482 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
4483 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
4484 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
4485 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004486
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004487 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
4488 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
4489 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
4490 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
4491
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004492 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00004493
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004494 /* QM queues pointers table */
4495 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
Michael Chan37b091b2009-10-10 13:46:55 +00004496
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004497 /* soft reset pulse */
4498 REG_WR(bp, QM_REG_SOFT_RESET, 1);
4499 REG_WR(bp, QM_REG_SOFT_RESET, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004500
Michael Chan37b091b2009-10-10 13:46:55 +00004501#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004502 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004503#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004504
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004505 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004506 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
4507
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004508 if (!CHIP_REV_IS_SLOW(bp)) {
4509 /* enable hw interrupt from doorbell Q */
4510 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4511 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004512
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004513 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4514 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08004515 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
Michael Chan37b091b2009-10-10 13:46:55 +00004516#ifndef BCM_CNIC
Eilon Greenstein3196a882008-08-13 15:58:49 -07004517 /* set NIC mode */
4518 REG_WR(bp, PRS_REG_NIC_MODE, 1);
Michael Chan37b091b2009-10-10 13:46:55 +00004519#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004520 if (CHIP_IS_E1H(bp))
4521 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004522
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004523 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
4524 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
4525 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
4526 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004527
Eilon Greensteinca003922009-08-12 22:53:28 -07004528 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4529 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4530 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4531 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004532
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004533 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
4534 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
4535 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
4536 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004537
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004538 /* sync semi rtc */
4539 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4540 0x80000000);
4541 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
4542 0x80000000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004543
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004544 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
4545 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
4546 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004547
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004548 REG_WR(bp, SRC_REG_SOFT_RST, 1);
Tom Herbertc68ed252010-04-23 00:10:52 -07004549 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
4550 REG_WR(bp, i, random32());
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004551 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00004552#ifdef BCM_CNIC
4553 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
4554 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
4555 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
4556 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
4557 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
4558 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
4559 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
4560 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
4561 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
4562 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
4563#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004564 REG_WR(bp, SRC_REG_SOFT_RST, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004565
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004566 if (sizeof(union cdu_context) != 1024)
4567 /* we currently assume that a context is 1024 bytes */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004568 dev_alert(&bp->pdev->dev, "please adjust the size "
4569 "of cdu_context(%ld)\n",
Joe Perches7995c642010-02-17 15:01:52 +00004570 (long)sizeof(union cdu_context));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004571
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004572 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004573 val = (4 << 24) + (0 << 12) + 1024;
4574 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004575
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004576 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004577 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08004578 /* enable context validation interrupt from CFC */
4579 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4580
4581 /* set the thresholds to prevent CFC/CDU race */
4582 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004583
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004584 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
4585 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004586
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004587 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004588 /* Reset PCIE errors for debug */
4589 REG_WR(bp, 0x2814, 0xffffffff);
4590 REG_WR(bp, 0x3820, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004591
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004592 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004593 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004594 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004595 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004596
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004597 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004598 if (CHIP_IS_E1H(bp)) {
4599 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
4600 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
4601 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004602
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004603 if (CHIP_REV_IS_SLOW(bp))
4604 msleep(200);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004605
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004606 /* finish CFC init */
4607 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
4608 if (val != 1) {
4609 BNX2X_ERR("CFC LL_INIT failed\n");
4610 return -EBUSY;
4611 }
4612 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
4613 if (val != 1) {
4614 BNX2X_ERR("CFC AC_INIT failed\n");
4615 return -EBUSY;
4616 }
4617 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
4618 if (val != 1) {
4619 BNX2X_ERR("CFC CAM_INIT failed\n");
4620 return -EBUSY;
4621 }
4622 REG_WR(bp, CFC_REG_DEBUG0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004623
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004624 /* read NIG statistic
4625 to see if this is our first up since powerup */
4626 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4627 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004628
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004629 /* do internal memory self test */
4630 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
4631 BNX2X_ERR("internal mem self test failed\n");
4632 return -EBUSY;
4633 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004634
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004635 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00004636 bp->common.shmem_base,
4637 bp->common.shmem2_base);
Eliezer Tamirf1410642008-02-28 11:51:50 -08004638
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004639 bnx2x_setup_fan_failure_detection(bp);
4640
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004641 /* clear PXP2 attentions */
4642 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004643
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004644 enable_blocks_attention(bp);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004645 if (CHIP_PARITY_SUPPORTED(bp))
4646 enable_blocks_parity(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004647
Yaniv Rosner6bbca912008-08-13 15:57:28 -07004648 if (!BP_NOMCP(bp)) {
4649 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00004650 bnx2x_common_init_phy(bp, bp->common.shmem_base,
4651 bp->common.shmem2_base);
Yaniv Rosner6bbca912008-08-13 15:57:28 -07004652 bnx2x_release_phy_lock(bp);
4653 } else
4654 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
4655
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004656 return 0;
4657}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004658
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004659static int bnx2x_init_hw_port(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004660{
4661 int port = BP_PORT(bp);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004662 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
Eilon Greenstein1c063282009-02-12 08:36:43 +00004663 u32 low, high;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004664 u32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004665
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004666 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004667
4668 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004669
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004670 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004671 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07004672
4673 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
4674 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
4675 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004676 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004677
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004678 /* QM cid (connection) count */
4679 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004680
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004681#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004682 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
Michael Chan37b091b2009-10-10 13:46:55 +00004683 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
4684 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004685#endif
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004686
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004687 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00004688
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004689 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00004690 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
4691 /* no pause for emulation and FPGA */
4692 low = 0;
4693 high = 513;
4694 } else {
4695 if (IS_E1HMF(bp))
4696 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
4697 else if (bp->dev->mtu > 4096) {
4698 if (bp->flags & ONE_PORT_FLAG)
4699 low = 160;
4700 else {
4701 val = bp->dev->mtu;
4702 /* (24*1024 + val*4)/256 */
4703 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
4704 }
4705 } else
4706 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
4707 high = low + 56; /* 14*1024/256 */
4708 }
4709 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
4710 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
4711
4712
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004713 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07004714
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004715 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004716 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004717 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004718 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00004719
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004720 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
4721 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
4722 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
4723 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00004724
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004725 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004726 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004727
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004728 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004729
4730 /* configure PBF to work without PAUSE mtu 9000 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004731 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004732
4733 /* update threshold */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004734 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004735 /* update init credit */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004736 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004737
4738 /* probe changes */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004739 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004740 msleep(5);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004741 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004742
Michael Chan37b091b2009-10-10 13:46:55 +00004743#ifdef BCM_CNIC
4744 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004745#endif
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004746 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004747 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004748
4749 if (CHIP_IS_E1(bp)) {
4750 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4751 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4752 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004753 bnx2x_init_block(bp, HC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004754
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004755 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004756 /* init aeu_mask_attn_func_0/1:
4757 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
4758 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
4759 * bits 4-7 are used for "per vn group attention" */
4760 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
4761 (IS_E1HMF(bp) ? 0xF7 : 0x7));
4762
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004763 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004764 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004765 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004766 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004767 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00004768
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004769 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004770
4771 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
4772
4773 if (CHIP_IS_E1H(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004774 /* 0x2 disable e1hov, 0x1 enable */
4775 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
4776 (IS_E1HMF(bp) ? 0x1 : 0x2));
4777
Eilon Greenstein1c063282009-02-12 08:36:43 +00004778 {
4779 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
4780 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
4781 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
4782 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004783 }
4784
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004785 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004786 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004787 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00004788 bp->common.shmem_base,
4789 bp->common.shmem2_base);
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004790 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00004791 bp->common.shmem2_base, port)) {
Eilon Greenstein4d295db2009-07-21 05:47:47 +00004792 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4793 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4794 val = REG_RD(bp, reg_addr);
Eliezer Tamirf1410642008-02-28 11:51:50 -08004795 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
Eilon Greenstein4d295db2009-07-21 05:47:47 +00004796 REG_WR(bp, reg_addr, val);
Eliezer Tamirf1410642008-02-28 11:51:50 -08004797 }
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07004798 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004799
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004800 return 0;
4801}
4802
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004803static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
4804{
4805 int reg;
4806
4807 if (CHIP_IS_E1H(bp))
4808 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
4809 else /* E1 */
4810 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
4811
4812 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
4813}
4814
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004815static int bnx2x_init_hw_func(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004816{
4817 int port = BP_PORT(bp);
4818 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004819 struct bnx2x_ilt *ilt = BP_ILT(bp);
4820 u16 cdu_ilt_start;
Eilon Greenstein8badd272009-02-12 08:36:15 +00004821 u32 addr, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004822 int i;
4823
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004824 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004825
Eilon Greenstein8badd272009-02-12 08:36:15 +00004826 /* set MSI reconfigure capability */
4827 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
4828 val = REG_RD(bp, addr);
4829 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
4830 REG_WR(bp, addr, val);
4831
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004832 ilt = BP_ILT(bp);
4833 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004834
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004835 for (i = 0; i < L2_ILT_LINES(bp); i++) {
4836 ilt->lines[cdu_ilt_start + i].page =
4837 bp->context.vcxt + (ILT_PAGE_CIDS * i);
4838 ilt->lines[cdu_ilt_start + i].page_mapping =
4839 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
4840 /* cdu ilt pages are allocated manually so there's no need to
4841 set the size */
4842 }
4843 bnx2x_ilt_init_op(bp, INITOP_SET);
Michael Chan37b091b2009-10-10 13:46:55 +00004844#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004845 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
Michael Chan37b091b2009-10-10 13:46:55 +00004846
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004847 /* T1 hash bits value determines the T1 number of entries */
4848 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
Michael Chan37b091b2009-10-10 13:46:55 +00004849#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004850
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004851#ifndef BCM_CNIC
4852 /* set NIC mode */
4853 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4854#endif /* BCM_CNIC */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004855
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004856 bp->dmae_ready = 1;
4857
4858 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
4859
4860 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
4861 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
4862 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
4863 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
4864 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
4865 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
4866 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
4867 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
4868 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
4869
4870 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
4871 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
4872 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
4873 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
4874 bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
4875 bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
4876 bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
4877 bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
4878 bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
4879 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
4880 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
4881 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
4882 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
4883
4884 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
4885
4886 if (IS_E1HMF(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004887 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
4888 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
4889 }
4890
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004891 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
4892
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004893 /* HC init per function */
4894 if (CHIP_IS_E1H(bp)) {
4895 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4896
4897 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4898 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4899 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004900 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004901
Eliezer Tamirc14423f2008-02-28 11:49:42 -08004902 /* Reset PCIE errors for debug */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004903 REG_WR(bp, 0x2114, 0xffffffff);
4904 REG_WR(bp, 0x2120, 0xffffffff);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004905
4906 bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
4907 bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
4908 bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
4909 bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
4910 bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
4911 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
4912
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00004913 bnx2x_phy_probe(&bp->link_params);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004914 return 0;
4915}
4916
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004917int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004918{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004919 int rc = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004920
4921 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
4922 BP_FUNC(bp), load_code);
4923
4924 bp->dmae_ready = 0;
4925 mutex_init(&bp->dmae_mutex);
Eilon Greenstein54016b22009-08-12 08:23:48 +00004926 rc = bnx2x_gunzip_init(bp);
4927 if (rc)
4928 return rc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004929
4930 switch (load_code) {
4931 case FW_MSG_CODE_DRV_LOAD_COMMON:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004932 rc = bnx2x_init_hw_common(bp, load_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004933 if (rc)
4934 goto init_hw_err;
4935 /* no break */
4936
4937 case FW_MSG_CODE_DRV_LOAD_PORT:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004938 rc = bnx2x_init_hw_port(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004939 if (rc)
4940 goto init_hw_err;
4941 /* no break */
4942
4943 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004944 rc = bnx2x_init_hw_func(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004945 if (rc)
4946 goto init_hw_err;
4947 break;
4948
4949 default:
4950 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4951 break;
4952 }
4953
4954 if (!BP_NOMCP(bp)) {
4955 int func = BP_FUNC(bp);
4956
4957 bp->fw_drv_pulse_wr_seq =
4958 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
4959 DRV_PULSE_SEQ_MASK);
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00004960 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
4961 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004962
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004963init_hw_err:
4964 bnx2x_gunzip_end(bp);
4965
4966 return rc;
4967}
4968
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004969void bnx2x_free_mem(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004970{
4971
4972#define BNX2X_PCI_FREE(x, y, size) \
4973 do { \
4974 if (x) { \
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004975 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004976 x = NULL; \
4977 y = 0; \
4978 } \
4979 } while (0)
4980
4981#define BNX2X_FREE(x) \
4982 do { \
4983 if (x) { \
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004984 kfree((void *)x); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004985 x = NULL; \
4986 } \
4987 } while (0)
4988
4989 int i;
4990
4991 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004992 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004993 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004994 /* status blocks */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004995 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004996 bnx2x_fp(bp, i, status_blk_mapping),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004997 sizeof(struct host_hc_status_block_e1x));
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004998 }
4999 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005000 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005001
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005002 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005003 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5004 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5005 bnx2x_fp(bp, i, rx_desc_mapping),
5006 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5007
5008 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5009 bnx2x_fp(bp, i, rx_comp_mapping),
5010 sizeof(struct eth_fast_path_rx_cqe) *
5011 NUM_RCQ_BD);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005012
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005013 /* SGE ring */
Eilon Greenstein32626232008-08-13 15:51:07 -07005014 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005015 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5016 bnx2x_fp(bp, i, rx_sge_mapping),
5017 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5018 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005019 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005020 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005021
5022 /* fastpath tx rings: tx_buf tx_desc */
5023 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5024 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5025 bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07005026 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005027 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005028 /* end of fastpath */
5029
5030 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005031 sizeof(struct host_sp_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005032
5033 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005034 sizeof(struct bnx2x_slowpath));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005035
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005036 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5037 bp->context.size);
5038
5039 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5040
5041 BNX2X_FREE(bp->ilt->lines);
Michael Chan37b091b2009-10-10 13:46:55 +00005042#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005043
5044 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5045 sizeof(struct host_hc_status_block_e1x));
5046 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005047#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005048 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005049
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005050 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
5051 BCM_PAGE_SIZE * NUM_EQ_PAGES);
5052
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005053#undef BNX2X_PCI_FREE
5054#undef BNX2X_KFREE
5055}
5056
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005057int bnx2x_alloc_mem(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005058{
5059
5060#define BNX2X_PCI_ALLOC(x, y, size) \
5061 do { \
FUJITA Tomonori1a983142010-04-04 01:51:03 +00005062 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005063 if (x == NULL) \
5064 goto alloc_mem_err; \
5065 memset(x, 0, size); \
5066 } while (0)
5067
5068#define BNX2X_ALLOC(x, size) \
5069 do { \
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005070 x = kzalloc(size, GFP_KERNEL); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005071 if (x == NULL) \
5072 goto alloc_mem_err; \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005073 } while (0)
5074
5075 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005076 void *p;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005077
5078 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005079 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005080 for_each_queue(bp, i) {
5081 bnx2x_fp(bp, i, bp) = bp;
5082
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005083 /* status blocks */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005084 BNX2X_PCI_ALLOC(p,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005085 &bnx2x_fp(bp, i, status_blk_mapping),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005086 sizeof(struct host_hc_status_block_e1x));
5087
5088 bnx2x_fp(bp, i, status_blk.e1x_sb) =
5089 (struct host_hc_status_block_e1x *)p;
5090
5091 bnx2x_fp(bp, i, sb_index_values) = (__le16 *)
5092 (bnx2x_fp(bp, i, status_blk.e1x_sb)->sb.index_values);
5093 bnx2x_fp(bp, i, sb_running_index) = (__le16 *)
5094 (bnx2x_fp(bp, i, status_blk.e1x_sb)->sb.running_index);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005095 }
5096 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005097 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005098
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005099 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005100 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5101 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5102 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5103 &bnx2x_fp(bp, i, rx_desc_mapping),
5104 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5105
5106 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5107 &bnx2x_fp(bp, i, rx_comp_mapping),
5108 sizeof(struct eth_fast_path_rx_cqe) *
5109 NUM_RCQ_BD);
5110
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005111 /* SGE ring */
5112 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5113 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5114 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5115 &bnx2x_fp(bp, i, rx_sge_mapping),
5116 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005117 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005118 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005119 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005120
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005121 /* fastpath tx rings: tx_buf tx_desc */
5122 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5123 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5124 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5125 &bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07005126 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005127 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005128 /* end of fastpath */
5129
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005130#ifdef BCM_CNIC
5131 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
5132 sizeof(struct host_hc_status_block_e1x));
5133
5134 /* allocate searcher T2 table */
5135 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
5136#endif
5137
5138
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005139 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005140 sizeof(struct host_sp_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005141
5142 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5143 sizeof(struct bnx2x_slowpath));
5144
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005145 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
5146 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
5147 bp->context.size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005148
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005149 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005150
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005151 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
5152 goto alloc_mem_err;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005153
5154 /* Slow path ring */
5155 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5156
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005157 /* EQ */
5158 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
5159 BCM_PAGE_SIZE * NUM_EQ_PAGES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005160 return 0;
5161
5162alloc_mem_err:
5163 bnx2x_free_mem(bp);
5164 return -ENOMEM;
5165
5166#undef BNX2X_PCI_ALLOC
5167#undef BNX2X_ALLOC
5168}
5169
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005170/*
5171 * Init service functions
5172 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005173int bnx2x_func_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005174{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005175 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005176
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005177 /* Wait for completion */
5178 return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
5179 WAIT_RAMROD_COMMON);
5180}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005181
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005182int bnx2x_func_stop(struct bnx2x *bp)
5183{
5184 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005185
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005186 /* Wait for completion */
5187 return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
5188 0, &(bp->state), WAIT_RAMROD_COMMON);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005189}
5190
Michael Chane665bfd2009-10-10 13:46:54 +00005191/**
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005192 * Sets a MAC in a CAM for a few L2 Clients for E1x chip
Michael Chane665bfd2009-10-10 13:46:54 +00005193 *
5194 * @param bp driver descriptor
5195 * @param set set or clear an entry (1 or 0)
5196 * @param mac pointer to a buffer containing a MAC
5197 * @param cl_bit_vec bit vector of clients to register a MAC for
5198 * @param cam_offset offset in a CAM to use
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005199 * @param is_bcast is the set MAC a broadcast address (for E1 only)
Michael Chane665bfd2009-10-10 13:46:54 +00005200 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005201static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
5202 u32 cl_bit_vec, u8 cam_offset,
5203 u8 is_bcast)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005204{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005205 struct mac_configuration_cmd *config =
5206 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
5207 int ramrod_flags = WAIT_RAMROD_COMMON;
5208
5209 bp->set_mac_pending = 1;
5210 smp_wmb();
5211
5212 config->hdr.length = 1 + (is_bcast ? 1 : 0);
5213 config->hdr.offset = cam_offset;
5214 config->hdr.client_id = 0xff;
5215 config->hdr.reserved1 = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005216
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005217 config->hdr.length = 1;
Michael Chane665bfd2009-10-10 13:46:54 +00005218 config->hdr.offset = cam_offset;
5219 config->hdr.client_id = 0xff;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005220 config->hdr.reserved1 = 0;
5221
5222 /* primary MAC */
5223 config->config_table[0].msb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00005224 swab16(*(u16 *)&mac[0]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005225 config->config_table[0].middle_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00005226 swab16(*(u16 *)&mac[2]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005227 config->config_table[0].lsb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00005228 swab16(*(u16 *)&mac[4]);
Eilon Greensteinca003922009-08-12 22:53:28 -07005229 config->config_table[0].clients_bit_vector =
Michael Chane665bfd2009-10-10 13:46:54 +00005230 cpu_to_le32(cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005231 config->config_table[0].vlan_id = 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005232 config->config_table[0].pf_id = BP_FUNC(bp);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07005233 if (set)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005234 SET_FLAG(config->config_table[0].flags,
5235 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
5236 T_ETH_MAC_COMMAND_SET);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07005237 else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005238 SET_FLAG(config->config_table[0].flags,
5239 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
5240 T_ETH_MAC_COMMAND_INVALIDATE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005241
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005242 if (is_bcast)
5243 SET_FLAG(config->config_table[0].flags,
5244 MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
5245
5246 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07005247 (set ? "setting" : "clearing"),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005248 config->config_table[0].msb_mac_addr,
5249 config->config_table[0].middle_mac_addr,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005250 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005251
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005252 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005253 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005254 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
5255
5256 /* Wait for a completion */
5257 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005258}
5259
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005260
5261int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
5262 int *state_p, int flags)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005263{
5264 /* can take a while if any port is running */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00005265 int cnt = 5000;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005266 u8 poll = flags & WAIT_RAMROD_POLL;
5267 u8 common = flags & WAIT_RAMROD_COMMON;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005268
Eliezer Tamirc14423f2008-02-28 11:49:42 -08005269 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
5270 poll ? "polling" : "waiting", state, idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005271
5272 might_sleep();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005273 while (cnt--) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005274 if (poll) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005275 if (common)
5276 bnx2x_eq_int(bp);
5277 else {
5278 bnx2x_rx_int(bp->fp, 10);
5279 /* if index is different from 0
5280 * the reply for some commands will
5281 * be on the non default queue
5282 */
5283 if (idx)
5284 bnx2x_rx_int(&bp->fp[idx], 10);
5285 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005286 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005287
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07005288 mb(); /* state is changed by bnx2x_sp_event() */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00005289 if (*state_p == state) {
5290#ifdef BNX2X_STOP_ON_ERROR
5291 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
5292#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005293 return 0;
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00005294 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005295
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005296 msleep(1);
Eilon Greensteine3553b22009-08-12 08:23:31 +00005297
5298 if (bp->panic)
5299 return -EIO;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005300 }
5301
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005302 /* timeout! */
Eliezer Tamir49d66772008-02-28 11:53:13 -08005303 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
5304 poll ? "polling" : "waiting", state, idx);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005305#ifdef BNX2X_STOP_ON_ERROR
5306 bnx2x_panic();
5307#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005308
Eliezer Tamir49d66772008-02-28 11:53:13 -08005309 return -EBUSY;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005310}
5311
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005312u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
Michael Chane665bfd2009-10-10 13:46:54 +00005313{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005314 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
Michael Chane665bfd2009-10-10 13:46:54 +00005315}
5316
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005317void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
Michael Chane665bfd2009-10-10 13:46:54 +00005318{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005319 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
5320 bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
5321
5322 /* networking MAC */
5323 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
5324 (1 << bp->fp->cl_id), cam_offset , 0);
5325
5326 if (CHIP_IS_E1(bp)) {
5327 /* broadcast MAC */
5328 u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
5329 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
5330 }
5331}
5332static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
5333{
5334 int i = 0, old;
5335 struct net_device *dev = bp->dev;
5336 struct netdev_hw_addr *ha;
5337 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
5338 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
5339
5340 netdev_for_each_mc_addr(ha, dev) {
5341 /* copy mac */
5342 config_cmd->config_table[i].msb_mac_addr =
5343 swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
5344 config_cmd->config_table[i].middle_mac_addr =
5345 swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
5346 config_cmd->config_table[i].lsb_mac_addr =
5347 swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
5348
5349 config_cmd->config_table[i].vlan_id = 0;
5350 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
5351 config_cmd->config_table[i].clients_bit_vector =
5352 cpu_to_le32(1 << BP_L_ID(bp));
5353
5354 SET_FLAG(config_cmd->config_table[i].flags,
5355 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
5356 T_ETH_MAC_COMMAND_SET);
5357
5358 DP(NETIF_MSG_IFUP,
5359 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
5360 config_cmd->config_table[i].msb_mac_addr,
5361 config_cmd->config_table[i].middle_mac_addr,
5362 config_cmd->config_table[i].lsb_mac_addr);
5363 i++;
5364 }
5365 old = config_cmd->hdr.length;
5366 if (old > i) {
5367 for (; i < old; i++) {
5368 if (CAM_IS_INVALID(config_cmd->
5369 config_table[i])) {
5370 /* already invalidated */
5371 break;
5372 }
5373 /* invalidate */
5374 SET_FLAG(config_cmd->config_table[i].flags,
5375 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
5376 T_ETH_MAC_COMMAND_INVALIDATE);
5377 }
5378 }
5379
5380 config_cmd->hdr.length = i;
5381 config_cmd->hdr.offset = offset;
5382 config_cmd->hdr.client_id = 0xff;
5383 config_cmd->hdr.reserved1 = 0;
5384
5385 bp->set_mac_pending = 1;
Michael Chane665bfd2009-10-10 13:46:54 +00005386 smp_wmb();
5387
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005388 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
5389 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
5390}
5391static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
5392{
5393 int i;
5394 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
5395 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
5396 int ramrod_flags = WAIT_RAMROD_COMMON;
5397
5398 bp->set_mac_pending = 1;
5399 smp_wmb();
5400
5401 for (i = 0; i < config_cmd->hdr.length; i++)
5402 SET_FLAG(config_cmd->config_table[i].flags,
5403 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
5404 T_ETH_MAC_COMMAND_INVALIDATE);
5405
5406 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
5407 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
Michael Chane665bfd2009-10-10 13:46:54 +00005408
5409 /* Wait for a completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005410 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
5411 ramrod_flags);
5412
Michael Chane665bfd2009-10-10 13:46:54 +00005413}
5414
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005415
Michael Chan993ac7b2009-10-10 13:46:56 +00005416#ifdef BCM_CNIC
5417/**
5418 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
5419 * MAC(s). This function will wait until the ramdord completion
5420 * returns.
5421 *
5422 * @param bp driver handle
5423 * @param set set or clear the CAM entry
5424 *
5425 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
5426 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005427int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
Michael Chan993ac7b2009-10-10 13:46:56 +00005428{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005429 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
5430 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
5431 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID;
5432 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
Michael Chan993ac7b2009-10-10 13:46:56 +00005433
5434 /* Send a SET_MAC ramrod */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005435 bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
5436 cam_offset, 0);
Michael Chan993ac7b2009-10-10 13:46:56 +00005437 return 0;
5438}
5439#endif
5440
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005441static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
5442 struct bnx2x_client_init_params *params,
5443 u8 activate,
5444 struct client_init_ramrod_data *data)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005445{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005446 /* Clear the buffer */
5447 memset(data, 0, sizeof(*data));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005448
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005449 /* general */
5450 data->general.client_id = params->rxq_params.cl_id;
5451 data->general.statistics_counter_id = params->rxq_params.stat_id;
5452 data->general.statistics_en_flg =
5453 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
5454 data->general.activate_flg = activate;
5455 data->general.sp_client_id = params->rxq_params.spcl_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005456
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005457 /* Rx data */
5458 data->rx.tpa_en_flg =
5459 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
5460 data->rx.vmqueue_mode_en_flg = 0;
5461 data->rx.cache_line_alignment_log_size =
5462 params->rxq_params.cache_line_log;
5463 data->rx.enable_dynamic_hc =
5464 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
5465 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
5466 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
5467 data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
5468
5469 /* We don't set drop flags */
5470 data->rx.drop_ip_cs_err_flg = 0;
5471 data->rx.drop_tcp_cs_err_flg = 0;
5472 data->rx.drop_ttl0_flg = 0;
5473 data->rx.drop_udp_cs_err_flg = 0;
5474
5475 data->rx.inner_vlan_removal_enable_flg =
5476 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
5477 data->rx.outer_vlan_removal_enable_flg =
5478 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
5479 data->rx.status_block_id = params->rxq_params.fw_sb_id;
5480 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
5481 data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
5482 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
5483 data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
5484 data->rx.bd_page_base.lo =
5485 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
5486 data->rx.bd_page_base.hi =
5487 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
5488 data->rx.sge_page_base.lo =
5489 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
5490 data->rx.sge_page_base.hi =
5491 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
5492 data->rx.cqe_page_base.lo =
5493 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
5494 data->rx.cqe_page_base.hi =
5495 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
5496 data->rx.is_leading_rss =
5497 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
5498 data->rx.is_approx_mcast = data->rx.is_leading_rss;
5499
5500 /* Tx data */
5501 data->tx.enforce_security_flg = 0; /* VF specific */
5502 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
5503 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
5504 data->tx.mtu = 0; /* VF specific */
5505 data->tx.tx_bd_page_base.lo =
5506 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
5507 data->tx.tx_bd_page_base.hi =
5508 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
5509
5510 /* flow control data */
5511 data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
5512 data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
5513 data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
5514 data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
5515 data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
5516 data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
5517 data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
5518
5519 data->fc.safc_group_num = params->txq_params.cos;
5520 data->fc.safc_group_en_flg =
5521 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
5522 data->fc.traffic_type = LLFC_TRAFFIC_TYPE_NW;
5523}
5524
5525static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
5526{
5527 /* ustorm cxt validation */
5528 cxt->ustorm_ag_context.cdu_usage =
5529 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
5530 ETH_CONNECTION_TYPE);
5531 /* xcontext validation */
5532 cxt->xstorm_ag_context.cdu_reserved =
5533 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
5534 ETH_CONNECTION_TYPE);
5535}
5536
5537int bnx2x_setup_fw_client(struct bnx2x *bp,
5538 struct bnx2x_client_init_params *params,
5539 u8 activate,
5540 struct client_init_ramrod_data *data,
5541 dma_addr_t data_mapping)
5542{
5543 u16 hc_usec;
5544 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
5545 int ramrod_flags = 0, rc;
5546
5547 /* HC and context validation values */
5548 hc_usec = params->txq_params.hc_rate ?
5549 1000000 / params->txq_params.hc_rate : 0;
5550 bnx2x_update_coalesce_sb_index(bp,
5551 params->txq_params.fw_sb_id,
5552 params->txq_params.sb_cq_index,
5553 !(params->txq_params.flags & QUEUE_FLG_HC),
5554 hc_usec);
5555
5556 *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
5557
5558 hc_usec = params->rxq_params.hc_rate ?
5559 1000000 / params->rxq_params.hc_rate : 0;
5560 bnx2x_update_coalesce_sb_index(bp,
5561 params->rxq_params.fw_sb_id,
5562 params->rxq_params.sb_cq_index,
5563 !(params->rxq_params.flags & QUEUE_FLG_HC),
5564 hc_usec);
5565
5566 bnx2x_set_ctx_validation(params->rxq_params.cxt,
5567 params->rxq_params.cid);
5568
5569 /* zero stats */
5570 if (params->txq_params.flags & QUEUE_FLG_STATS)
5571 storm_memset_xstats_zero(bp, BP_PORT(bp),
5572 params->txq_params.stat_id);
5573
5574 if (params->rxq_params.flags & QUEUE_FLG_STATS) {
5575 storm_memset_ustats_zero(bp, BP_PORT(bp),
5576 params->rxq_params.stat_id);
5577 storm_memset_tstats_zero(bp, BP_PORT(bp),
5578 params->rxq_params.stat_id);
5579 }
5580
5581 /* Fill the ramrod data */
5582 bnx2x_fill_cl_init_data(bp, params, activate, data);
5583
5584 /* SETUP ramrod.
5585 *
5586 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
5587 * barrier except from mmiowb() is needed to impose a
5588 * proper ordering of memory operations.
5589 */
5590 mmiowb();
5591
5592
5593 bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
5594 U64_HI(data_mapping), U64_LO(data_mapping), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005595
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005596 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005597 rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
5598 params->ramrod_params.index,
5599 params->ramrod_params.pstate,
5600 ramrod_flags);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005601 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005602}
5603
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005604void bnx2x_set_num_queues_msix(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005605{
Eilon Greensteinca003922009-08-12 22:53:28 -07005606
5607 switch (bp->multi_mode) {
5608 case ETH_RSS_MODE_DISABLED:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005609 bp->num_queues = 1;
Eilon Greensteinca003922009-08-12 22:53:28 -07005610 break;
5611
5612 case ETH_RSS_MODE_REGULAR:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005613 if (num_queues)
5614 bp->num_queues = min_t(u32, num_queues,
5615 BNX2X_MAX_QUEUES(bp));
Eilon Greensteinca003922009-08-12 22:53:28 -07005616 else
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005617 bp->num_queues = min_t(u32, num_online_cpus(),
5618 BNX2X_MAX_QUEUES(bp));
Eilon Greensteinca003922009-08-12 22:53:28 -07005619 break;
5620
5621
5622 default:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005623 bp->num_queues = 1;
Eilon Greensteinca003922009-08-12 22:53:28 -07005624 break;
5625 }
Eilon Greensteinca003922009-08-12 22:53:28 -07005626}
5627
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005628void bnx2x_ilt_set_info(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005629{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005630 struct ilt_client_info *ilt_client;
5631 struct bnx2x_ilt *ilt = BP_ILT(bp);
5632 u16 line = 0;
5633
5634 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
5635 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
5636
5637 /* CDU */
5638 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
5639 ilt_client->client_num = ILT_CLIENT_CDU;
5640 ilt_client->page_size = CDU_ILT_PAGE_SZ;
5641 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
5642 ilt_client->start = line;
5643 line += L2_ILT_LINES(bp);
5644#ifdef BCM_CNIC
5645 line += CNIC_ILT_LINES;
5646#endif
5647 ilt_client->end = line - 1;
5648
5649 DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
5650 "flags 0x%x, hw psz %d\n",
5651 ilt_client->start,
5652 ilt_client->end,
5653 ilt_client->page_size,
5654 ilt_client->flags,
5655 ilog2(ilt_client->page_size >> 12));
5656
5657 /* QM */
5658 if (QM_INIT(bp->qm_cid_count)) {
5659 ilt_client = &ilt->clients[ILT_CLIENT_QM];
5660 ilt_client->client_num = ILT_CLIENT_QM;
5661 ilt_client->page_size = QM_ILT_PAGE_SZ;
5662 ilt_client->flags = 0;
5663 ilt_client->start = line;
5664
5665 /* 4 bytes for each cid */
5666 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
5667 QM_ILT_PAGE_SZ);
5668
5669 ilt_client->end = line - 1;
5670
5671 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
5672 "flags 0x%x, hw psz %d\n",
5673 ilt_client->start,
5674 ilt_client->end,
5675 ilt_client->page_size,
5676 ilt_client->flags,
5677 ilog2(ilt_client->page_size >> 12));
5678
5679 }
5680 /* SRC */
5681 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
5682#ifdef BCM_CNIC
5683 ilt_client->client_num = ILT_CLIENT_SRC;
5684 ilt_client->page_size = SRC_ILT_PAGE_SZ;
5685 ilt_client->flags = 0;
5686 ilt_client->start = line;
5687 line += SRC_ILT_LINES;
5688 ilt_client->end = line - 1;
5689
5690 DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
5691 "flags 0x%x, hw psz %d\n",
5692 ilt_client->start,
5693 ilt_client->end,
5694 ilt_client->page_size,
5695 ilt_client->flags,
5696 ilog2(ilt_client->page_size >> 12));
5697
5698#else
5699 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
5700#endif
5701
5702 /* TM */
5703 ilt_client = &ilt->clients[ILT_CLIENT_TM];
5704#ifdef BCM_CNIC
5705 ilt_client->client_num = ILT_CLIENT_TM;
5706 ilt_client->page_size = TM_ILT_PAGE_SZ;
5707 ilt_client->flags = 0;
5708 ilt_client->start = line;
5709 line += TM_ILT_LINES;
5710 ilt_client->end = line - 1;
5711
5712 DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
5713 "flags 0x%x, hw psz %d\n",
5714 ilt_client->start,
5715 ilt_client->end,
5716 ilt_client->page_size,
5717 ilt_client->flags,
5718 ilog2(ilt_client->page_size >> 12));
5719
5720#else
5721 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
5722#endif
5723}
5724int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
5725 int is_leading)
5726{
5727 struct bnx2x_client_init_params params = { {0} };
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005728 int rc;
5729
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005730 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
5731 IGU_INT_ENABLE, 0);
5732
5733 params.ramrod_params.pstate = &fp->state;
5734 params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
5735 params.ramrod_params.index = fp->index;
5736 params.ramrod_params.cid = fp->cid;
5737
5738 if (is_leading)
5739 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
5740
5741 bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
5742
5743 bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
5744
5745 rc = bnx2x_setup_fw_client(bp, &params, 1,
5746 bnx2x_sp(bp, client_init_data),
5747 bnx2x_sp_mapping(bp, client_init_data));
5748 return rc;
5749}
5750
5751int bnx2x_stop_fw_client(struct bnx2x *bp, struct bnx2x_client_ramrod_params *p)
5752{
5753 int rc;
5754
5755 int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
5756
Eliezer Tamirc14423f2008-02-28 11:49:42 -08005757 /* halt the connection */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005758 *p->pstate = BNX2X_FP_STATE_HALTING;
5759 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
5760 p->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005761
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005762 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005763 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
5764 p->pstate, poll_flag);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08005765 if (rc) /* timeout */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005766 return rc;
5767
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005768 *p->pstate = BNX2X_FP_STATE_TERMINATING;
5769 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
5770 p->cl_id, 0);
5771 /* Wait for completion */
5772 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
5773 p->pstate, poll_flag);
5774 if (rc) /* timeout */
5775 return rc;
5776
5777
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005778 /* delete cfc entry */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005779 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005780
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005781 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005782 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
5783 p->pstate, WAIT_RAMROD_COMMON);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005784 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005785}
5786
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005787static int bnx2x_stop_client(struct bnx2x *bp, int index)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005788{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005789 struct bnx2x_client_ramrod_params client_stop = {0};
5790 struct bnx2x_fastpath *fp = &bp->fp[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005791
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005792 client_stop.index = index;
5793 client_stop.cid = fp->cid;
5794 client_stop.cl_id = fp->cl_id;
5795 client_stop.pstate = &(fp->state);
5796 client_stop.poll = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005797
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005798 return bnx2x_stop_fw_client(bp, &client_stop);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005799}
5800
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005801
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005802static void bnx2x_reset_func(struct bnx2x *bp)
5803{
5804 int port = BP_PORT(bp);
5805 int func = BP_FUNC(bp);
5806 int base, i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005807 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
5808 offsetof(struct hc_status_block_data_e1x, common);
5809 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
5810 int pfid_offset = offsetof(struct pci_entity, pf_id);
5811
5812 /* Disable the function in the FW */
5813 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
5814 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
5815 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
5816 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
5817
5818 /* FP SBs */
5819 for_each_queue(bp, i) {
5820 struct bnx2x_fastpath *fp = &bp->fp[i];
5821 REG_WR8(bp,
5822 BAR_CSTRORM_INTMEM +
5823 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
5824 + pfunc_offset_fp + pfid_offset,
5825 HC_FUNCTION_DISABLED);
5826 }
5827
5828 /* SP SB */
5829 REG_WR8(bp,
5830 BAR_CSTRORM_INTMEM +
5831 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
5832 pfunc_offset_sp + pfid_offset,
5833 HC_FUNCTION_DISABLED);
5834
5835
5836 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
5837 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
5838 0);
Eliezer Tamir49d66772008-02-28 11:53:13 -08005839
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005840 /* Configure IGU */
5841 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5842 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5843
Michael Chan37b091b2009-10-10 13:46:55 +00005844#ifdef BCM_CNIC
5845 /* Disable Timer scan */
5846 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
5847 /*
5848 * Wait for at least 10ms and up to 2 second for the timers scan to
5849 * complete
5850 */
5851 for (i = 0; i < 200; i++) {
5852 msleep(10);
5853 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
5854 break;
5855 }
5856#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005857 /* Clear ILT */
5858 base = FUNC_ILT_BASE(func);
5859 for (i = base; i < base + ILT_PER_FUNC; i++)
5860 bnx2x_ilt_wr(bp, i, 0);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005861
5862 bp->dmae_ready = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005863}
5864
5865static void bnx2x_reset_port(struct bnx2x *bp)
5866{
5867 int port = BP_PORT(bp);
5868 u32 val;
5869
5870 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5871
5872 /* Do not rcv packets to BRB */
5873 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
5874 /* Do not direct rcv packets that are not for MCP to the BRB */
5875 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
5876 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5877
5878 /* Configure AEU */
5879 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
5880
5881 msleep(100);
5882 /* Check for BRB port occupancy */
5883 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
5884 if (val)
5885 DP(NETIF_MSG_IFDOWN,
Eilon Greenstein33471622008-08-13 15:59:08 -07005886 "BRB1 is not empty %d blocks are occupied\n", val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005887
5888 /* TODO: Close Doorbell port? */
5889}
5890
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005891static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
5892{
5893 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
5894 BP_FUNC(bp), reset_code);
5895
5896 switch (reset_code) {
5897 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5898 bnx2x_reset_port(bp);
5899 bnx2x_reset_func(bp);
5900 bnx2x_reset_common(bp);
5901 break;
5902
5903 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5904 bnx2x_reset_port(bp);
5905 bnx2x_reset_func(bp);
5906 break;
5907
5908 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5909 bnx2x_reset_func(bp);
5910 break;
5911
5912 default:
5913 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
5914 break;
5915 }
5916}
5917
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005918void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005919{
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005920 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005921 u32 reset_code = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005922 int i, cnt, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005923
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005924 /* Wait until tx fastpath tasks complete */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005925 for_each_queue(bp, i) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08005926 struct bnx2x_fastpath *fp = &bp->fp[i];
5927
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005928 cnt = 1000;
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -08005929 while (bnx2x_has_tx_work_unload(fp)) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005930
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005931 if (!cnt) {
5932 BNX2X_ERR("timeout waiting for queue[%d]\n",
5933 i);
5934#ifdef BNX2X_STOP_ON_ERROR
5935 bnx2x_panic();
5936 return -EBUSY;
5937#else
5938 break;
5939#endif
5940 }
5941 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005942 msleep(1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005943 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08005944 }
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005945 /* Give HW time to discard old tx messages */
5946 msleep(1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005947
Yitchak Gertner65abd742008-08-25 15:26:24 -07005948 if (CHIP_IS_E1(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005949 /* invalidate mc list,
5950 * wait and poll (interrupts are off)
5951 */
5952 bnx2x_invlidate_e1_mc_list(bp);
5953 bnx2x_set_eth_mac(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07005954
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005955 } else {
Yitchak Gertner65abd742008-08-25 15:26:24 -07005956 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
5957
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005958 bnx2x_set_eth_mac(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07005959
5960 for (i = 0; i < MC_HASH_SIZE; i++)
5961 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
5962 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005963
Michael Chan993ac7b2009-10-10 13:46:56 +00005964#ifdef BCM_CNIC
5965 /* Clear iSCSI L2 MAC */
5966 mutex_lock(&bp->cnic_mutex);
5967 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
5968 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
5969 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
5970 }
5971 mutex_unlock(&bp->cnic_mutex);
5972#endif
Yitchak Gertner65abd742008-08-25 15:26:24 -07005973
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005974 if (unload_mode == UNLOAD_NORMAL)
5975 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eliezer Tamir228241e2008-02-28 11:56:57 -08005976
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00005977 else if (bp->flags & NO_WOL_FLAG)
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005978 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005979
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00005980 else if (bp->wol) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005981 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005982 u8 *mac_addr = bp->dev->dev_addr;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005983 u32 val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005984 /* The mac address is written to entries 1-4 to
5985 preserve entry 0 which is used by the PMF */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005986 u8 entry = (BP_E1HVN(bp) + 1)*8;
5987
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005988 val = (mac_addr[0] << 8) | mac_addr[1];
Eilon Greenstein3196a882008-08-13 15:58:49 -07005989 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005990
5991 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
5992 (mac_addr[4] << 8) | mac_addr[5];
Eilon Greenstein3196a882008-08-13 15:58:49 -07005993 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005994
5995 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
Eliezer Tamir228241e2008-02-28 11:56:57 -08005996
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005997 } else
5998 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5999
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006000 /* Close multi and leading connections
6001 Completions for ramrods are collected in a synchronous way */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006002 for_each_queue(bp, i)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006003
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006004 if (bnx2x_stop_client(bp, i))
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006005#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006006 return;
6007#else
6008 goto unload_error;
6009#endif
6010
6011 rc = bnx2x_func_stop(bp);
6012 if (rc) {
6013 BNX2X_ERR("Function stop failed!\n");
6014#ifdef BNX2X_STOP_ON_ERROR
6015 return;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006016#else
6017 goto unload_error;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006018#endif
Eliezer Tamir228241e2008-02-28 11:56:57 -08006019 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006020#ifndef BNX2X_STOP_ON_ERROR
Eliezer Tamir228241e2008-02-28 11:56:57 -08006021unload_error:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006022#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006023 if (!BP_NOMCP(bp))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006024 reset_code = bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006025 else {
Eilon Greensteinf5372252009-02-12 08:38:30 +00006026 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006027 load_count[0], load_count[1], load_count[2]);
6028 load_count[0]--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006029 load_count[1 + port]--;
Eilon Greensteinf5372252009-02-12 08:38:30 +00006030 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006031 load_count[0], load_count[1], load_count[2]);
6032 if (load_count[0] == 0)
6033 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006034 else if (load_count[1 + port] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006035 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6036 else
6037 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6038 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006039
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006040 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6041 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6042 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006043
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006044 /* Disable HW interrupts, NAPI */
6045 bnx2x_netif_stop(bp, 1);
6046
6047 /* Release IRQs */
6048 bnx2x_free_irq(bp, false);
6049
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006050 /* Reset the chip */
Eliezer Tamir228241e2008-02-28 11:56:57 -08006051 bnx2x_reset_chip(bp, reset_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006052
6053 /* Report UNLOAD_DONE to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006054 if (!BP_NOMCP(bp))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006055 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006056
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006057}
6058
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00006059void bnx2x_disable_close_the_gate(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006060{
6061 u32 val;
6062
6063 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
6064
6065 if (CHIP_IS_E1(bp)) {
6066 int port = BP_PORT(bp);
6067 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6068 MISC_REG_AEU_MASK_ATTN_FUNC_0;
6069
6070 val = REG_RD(bp, addr);
6071 val &= ~(0x300);
6072 REG_WR(bp, addr, val);
6073 } else if (CHIP_IS_E1H(bp)) {
6074 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
6075 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
6076 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
6077 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
6078 }
6079}
6080
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006081
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006082/* Close gates #2, #3 and #4: */
6083static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
6084{
6085 u32 val, addr;
6086
6087 /* Gates #2 and #4a are closed/opened for "not E1" only */
6088 if (!CHIP_IS_E1(bp)) {
6089 /* #4 */
6090 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
6091 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
6092 close ? (val | 0x1) : (val & (~(u32)1)));
6093 /* #2 */
6094 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
6095 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
6096 close ? (val | 0x1) : (val & (~(u32)1)));
6097 }
6098
6099 /* #3 */
6100 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
6101 val = REG_RD(bp, addr);
6102 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
6103
6104 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
6105 close ? "closing" : "opening");
6106 mmiowb();
6107}
6108
6109#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
6110
6111static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
6112{
6113 /* Do some magic... */
6114 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
6115 *magic_val = val & SHARED_MF_CLP_MAGIC;
6116 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
6117}
6118
6119/* Restore the value of the `magic' bit.
6120 *
6121 * @param pdev Device handle.
6122 * @param magic_val Old value of the `magic' bit.
6123 */
6124static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
6125{
6126 /* Restore the `magic' bit value... */
6127 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
6128 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
6129 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
6130 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
6131 MF_CFG_WR(bp, shared_mf_config.clp_mb,
6132 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
6133}
6134
6135/* Prepares for MCP reset: takes care of CLP configurations.
6136 *
6137 * @param bp
6138 * @param magic_val Old value of 'magic' bit.
6139 */
6140static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
6141{
6142 u32 shmem;
6143 u32 validity_offset;
6144
6145 DP(NETIF_MSG_HW, "Starting\n");
6146
6147 /* Set `magic' bit in order to save MF config */
6148 if (!CHIP_IS_E1(bp))
6149 bnx2x_clp_reset_prep(bp, magic_val);
6150
6151 /* Get shmem offset */
6152 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6153 validity_offset = offsetof(struct shmem_region, validity_map[0]);
6154
6155 /* Clear validity map flags */
6156 if (shmem > 0)
6157 REG_WR(bp, shmem + validity_offset, 0);
6158}
6159
6160#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
6161#define MCP_ONE_TIMEOUT 100 /* 100 ms */
6162
6163/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
6164 * depending on the HW type.
6165 *
6166 * @param bp
6167 */
6168static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
6169{
6170 /* special handling for emulation and FPGA,
6171 wait 10 times longer */
6172 if (CHIP_REV_IS_SLOW(bp))
6173 msleep(MCP_ONE_TIMEOUT*10);
6174 else
6175 msleep(MCP_ONE_TIMEOUT);
6176}
6177
6178static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
6179{
6180 u32 shmem, cnt, validity_offset, val;
6181 int rc = 0;
6182
6183 msleep(100);
6184
6185 /* Get shmem offset */
6186 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6187 if (shmem == 0) {
6188 BNX2X_ERR("Shmem 0 return failure\n");
6189 rc = -ENOTTY;
6190 goto exit_lbl;
6191 }
6192
6193 validity_offset = offsetof(struct shmem_region, validity_map[0]);
6194
6195 /* Wait for MCP to come up */
6196 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
6197 /* TBD: its best to check validity map of last port.
6198 * currently checks on port 0.
6199 */
6200 val = REG_RD(bp, shmem + validity_offset);
6201 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
6202 shmem + validity_offset, val);
6203
6204 /* check that shared memory is valid. */
6205 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6206 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6207 break;
6208
6209 bnx2x_mcp_wait_one(bp);
6210 }
6211
6212 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
6213
6214 /* Check that shared memory is valid. This indicates that MCP is up. */
6215 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
6216 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
6217 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
6218 rc = -ENOTTY;
6219 goto exit_lbl;
6220 }
6221
6222exit_lbl:
6223 /* Restore the `magic' bit value */
6224 if (!CHIP_IS_E1(bp))
6225 bnx2x_clp_reset_done(bp, magic_val);
6226
6227 return rc;
6228}
6229
6230static void bnx2x_pxp_prep(struct bnx2x *bp)
6231{
6232 if (!CHIP_IS_E1(bp)) {
6233 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
6234 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
6235 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
6236 mmiowb();
6237 }
6238}
6239
6240/*
6241 * Reset the whole chip except for:
6242 * - PCIE core
6243 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
6244 * one reset bit)
6245 * - IGU
6246 * - MISC (including AEU)
6247 * - GRC
6248 * - RBCN, RBCP
6249 */
6250static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
6251{
6252 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
6253
6254 not_reset_mask1 =
6255 MISC_REGISTERS_RESET_REG_1_RST_HC |
6256 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
6257 MISC_REGISTERS_RESET_REG_1_RST_PXP;
6258
6259 not_reset_mask2 =
6260 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
6261 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
6262 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
6263 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
6264 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
6265 MISC_REGISTERS_RESET_REG_2_RST_GRC |
6266 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
6267 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
6268
6269 reset_mask1 = 0xffffffff;
6270
6271 if (CHIP_IS_E1(bp))
6272 reset_mask2 = 0xffff;
6273 else
6274 reset_mask2 = 0x1ffff;
6275
6276 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6277 reset_mask1 & (~not_reset_mask1));
6278 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6279 reset_mask2 & (~not_reset_mask2));
6280
6281 barrier();
6282 mmiowb();
6283
6284 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
6285 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
6286 mmiowb();
6287}
6288
6289static int bnx2x_process_kill(struct bnx2x *bp)
6290{
6291 int cnt = 1000;
6292 u32 val = 0;
6293 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
6294
6295
6296 /* Empty the Tetris buffer, wait for 1s */
6297 do {
6298 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
6299 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
6300 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
6301 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
6302 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
6303 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
6304 ((port_is_idle_0 & 0x1) == 0x1) &&
6305 ((port_is_idle_1 & 0x1) == 0x1) &&
6306 (pgl_exp_rom2 == 0xffffffff))
6307 break;
6308 msleep(1);
6309 } while (cnt-- > 0);
6310
6311 if (cnt <= 0) {
6312 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
6313 " are still"
6314 " outstanding read requests after 1s!\n");
6315 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
6316 " port_is_idle_0=0x%08x,"
6317 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
6318 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
6319 pgl_exp_rom2);
6320 return -EAGAIN;
6321 }
6322
6323 barrier();
6324
6325 /* Close gates #2, #3 and #4 */
6326 bnx2x_set_234_gates(bp, true);
6327
6328 /* TBD: Indicate that "process kill" is in progress to MCP */
6329
6330 /* Clear "unprepared" bit */
6331 REG_WR(bp, MISC_REG_UNPREPARED, 0);
6332 barrier();
6333
6334 /* Make sure all is written to the chip before the reset */
6335 mmiowb();
6336
6337 /* Wait for 1ms to empty GLUE and PCI-E core queues,
6338 * PSWHST, GRC and PSWRD Tetris buffer.
6339 */
6340 msleep(1);
6341
6342 /* Prepare to chip reset: */
6343 /* MCP */
6344 bnx2x_reset_mcp_prep(bp, &val);
6345
6346 /* PXP */
6347 bnx2x_pxp_prep(bp);
6348 barrier();
6349
6350 /* reset the chip */
6351 bnx2x_process_kill_chip_reset(bp);
6352 barrier();
6353
6354 /* Recover after reset: */
6355 /* MCP */
6356 if (bnx2x_reset_mcp_comp(bp, val))
6357 return -EAGAIN;
6358
6359 /* PXP */
6360 bnx2x_pxp_prep(bp);
6361
6362 /* Open the gates #2, #3 and #4 */
6363 bnx2x_set_234_gates(bp, false);
6364
6365 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
6366 * reset state, re-enable attentions. */
6367
6368 return 0;
6369}
6370
6371static int bnx2x_leader_reset(struct bnx2x *bp)
6372{
6373 int rc = 0;
6374 /* Try to recover after the failure */
6375 if (bnx2x_process_kill(bp)) {
6376 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
6377 bp->dev->name);
6378 rc = -EAGAIN;
6379 goto exit_leader_reset;
6380 }
6381
6382 /* Clear "reset is in progress" bit and update the driver state */
6383 bnx2x_set_reset_done(bp);
6384 bp->recovery_state = BNX2X_RECOVERY_DONE;
6385
6386exit_leader_reset:
6387 bp->is_leader = 0;
6388 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
6389 smp_wmb();
6390 return rc;
6391}
6392
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006393/* Assumption: runs under rtnl lock. This together with the fact
6394 * that it's called only from bnx2x_reset_task() ensure that it
6395 * will never be called when netif_running(bp->dev) is false.
6396 */
6397static void bnx2x_parity_recover(struct bnx2x *bp)
6398{
6399 DP(NETIF_MSG_HW, "Handling parity\n");
6400 while (1) {
6401 switch (bp->recovery_state) {
6402 case BNX2X_RECOVERY_INIT:
6403 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
6404 /* Try to get a LEADER_LOCK HW lock */
6405 if (bnx2x_trylock_hw_lock(bp,
6406 HW_LOCK_RESOURCE_RESERVED_08))
6407 bp->is_leader = 1;
6408
6409 /* Stop the driver */
6410 /* If interface has been removed - break */
6411 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
6412 return;
6413
6414 bp->recovery_state = BNX2X_RECOVERY_WAIT;
6415 /* Ensure "is_leader" and "recovery_state"
6416 * update values are seen on other CPUs
6417 */
6418 smp_wmb();
6419 break;
6420
6421 case BNX2X_RECOVERY_WAIT:
6422 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
6423 if (bp->is_leader) {
6424 u32 load_counter = bnx2x_get_load_cnt(bp);
6425 if (load_counter) {
6426 /* Wait until all other functions get
6427 * down.
6428 */
6429 schedule_delayed_work(&bp->reset_task,
6430 HZ/10);
6431 return;
6432 } else {
6433 /* If all other functions got down -
6434 * try to bring the chip back to
6435 * normal. In any case it's an exit
6436 * point for a leader.
6437 */
6438 if (bnx2x_leader_reset(bp) ||
6439 bnx2x_nic_load(bp, LOAD_NORMAL)) {
6440 printk(KERN_ERR"%s: Recovery "
6441 "has failed. Power cycle is "
6442 "needed.\n", bp->dev->name);
6443 /* Disconnect this device */
6444 netif_device_detach(bp->dev);
6445 /* Block ifup for all function
6446 * of this ASIC until
6447 * "process kill" or power
6448 * cycle.
6449 */
6450 bnx2x_set_reset_in_progress(bp);
6451 /* Shut down the power */
6452 bnx2x_set_power_state(bp,
6453 PCI_D3hot);
6454 return;
6455 }
6456
6457 return;
6458 }
6459 } else { /* non-leader */
6460 if (!bnx2x_reset_is_done(bp)) {
6461 /* Try to get a LEADER_LOCK HW lock as
6462 * long as a former leader may have
6463 * been unloaded by the user or
6464 * released a leadership by another
6465 * reason.
6466 */
6467 if (bnx2x_trylock_hw_lock(bp,
6468 HW_LOCK_RESOURCE_RESERVED_08)) {
6469 /* I'm a leader now! Restart a
6470 * switch case.
6471 */
6472 bp->is_leader = 1;
6473 break;
6474 }
6475
6476 schedule_delayed_work(&bp->reset_task,
6477 HZ/10);
6478 return;
6479
6480 } else { /* A leader has completed
6481 * the "process kill". It's an exit
6482 * point for a non-leader.
6483 */
6484 bnx2x_nic_load(bp, LOAD_NORMAL);
6485 bp->recovery_state =
6486 BNX2X_RECOVERY_DONE;
6487 smp_wmb();
6488 return;
6489 }
6490 }
6491 default:
6492 return;
6493 }
6494 }
6495}
6496
6497/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
6498 * scheduled on a general queue in order to prevent a dead lock.
6499 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006500static void bnx2x_reset_task(struct work_struct *work)
6501{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006502 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006503
6504#ifdef BNX2X_STOP_ON_ERROR
6505 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6506 " so reset not done to allow debug dump,\n"
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006507 KERN_ERR " you will need to reboot when done\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006508 return;
6509#endif
6510
6511 rtnl_lock();
6512
6513 if (!netif_running(bp->dev))
6514 goto reset_task_exit;
6515
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006516 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
6517 bnx2x_parity_recover(bp);
6518 else {
6519 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6520 bnx2x_nic_load(bp, LOAD_NORMAL);
6521 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006522
6523reset_task_exit:
6524 rtnl_unlock();
6525}
6526
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006527/* end of nic load/unload */
6528
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006529/*
6530 * Init service functions
6531 */
6532
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00006533static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
6534{
6535 switch (func) {
6536 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
6537 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
6538 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
6539 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
6540 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
6541 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
6542 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
6543 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
6544 default:
6545 BNX2X_ERR("Unsupported function index: %d\n", func);
6546 return (u32)(-1);
6547 }
6548}
6549
6550static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
6551{
6552 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
6553
6554 /* Flush all outstanding writes */
6555 mmiowb();
6556
6557 /* Pretend to be function 0 */
6558 REG_WR(bp, reg, 0);
6559 /* Flush the GRC transaction (in the chip) */
6560 new_val = REG_RD(bp, reg);
6561 if (new_val != 0) {
6562 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
6563 new_val);
6564 BUG();
6565 }
6566
6567 /* From now we are in the "like-E1" mode */
6568 bnx2x_int_disable(bp);
6569
6570 /* Flush all outstanding writes */
6571 mmiowb();
6572
6573 /* Restore the original funtion settings */
6574 REG_WR(bp, reg, orig_func);
6575 new_val = REG_RD(bp, reg);
6576 if (new_val != orig_func) {
6577 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
6578 orig_func, new_val);
6579 BUG();
6580 }
6581}
6582
6583static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
6584{
6585 if (CHIP_IS_E1H(bp))
6586 bnx2x_undi_int_disable_e1h(bp, func);
6587 else
6588 bnx2x_int_disable(bp);
6589}
6590
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006591static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006592{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006593 u32 val;
6594
6595 /* Check if there is any driver already loaded */
6596 val = REG_RD(bp, MISC_REG_UNPREPARED);
6597 if (val == 0x1) {
6598 /* Check if it is the UNDI driver
6599 * UNDI driver initializes CID offset for normal bell to 0x7
6600 */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07006601 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006602 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6603 if (val == 0x7) {
6604 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006605 /* save our func */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006606 int func = BP_FUNC(bp);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006607 u32 swap_en;
6608 u32 swap_val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006609
Eilon Greensteinb4661732009-01-14 06:43:56 +00006610 /* clear the UNDI indication */
6611 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6612
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006613 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6614
6615 /* try unload UNDI on port 0 */
6616 bp->func = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006617 bp->fw_seq =
6618 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6619 DRV_MSG_SEQ_NUMBER_MASK);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006620 reset_code = bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006621
6622 /* if UNDI is loaded on the other port */
6623 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6624
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006625 /* send "DONE" for previous unload */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006626 bnx2x_fw_command(bp,
6627 DRV_MSG_CODE_UNLOAD_DONE, 0);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006628
6629 /* unload UNDI on port 1 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006630 bp->func = 1;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006631 bp->fw_seq =
6632 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6633 DRV_MSG_SEQ_NUMBER_MASK);
6634 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006635
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006636 bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006637 }
6638
Eilon Greensteinb4661732009-01-14 06:43:56 +00006639 /* now it's safe to release the lock */
6640 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6641
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00006642 bnx2x_undi_int_disable(bp, func);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006643
6644 /* close input traffic and wait for it */
6645 /* Do not rcv packets to BRB */
6646 REG_WR(bp,
6647 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6648 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6649 /* Do not direct rcv packets that are not for MCP to
6650 * the BRB */
6651 REG_WR(bp,
6652 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6653 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6654 /* clear AEU */
6655 REG_WR(bp,
6656 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6657 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6658 msleep(10);
6659
6660 /* save NIG port swap info */
6661 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6662 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006663 /* reset device */
6664 REG_WR(bp,
6665 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006666 0xd3ffffff);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006667 REG_WR(bp,
6668 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6669 0x1403);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006670 /* take the NIG out of reset and restore swap values */
6671 REG_WR(bp,
6672 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6673 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6674 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6675 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6676
6677 /* send unload done to the MCP */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006678 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006679
6680 /* restore our func and fw_seq */
6681 bp->func = func;
6682 bp->fw_seq =
6683 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6684 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greensteinb4661732009-01-14 06:43:56 +00006685
6686 } else
6687 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006688 }
6689}
6690
6691static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6692{
6693 u32 val, val2, val3, val4, id;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07006694 u16 pmc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006695
6696 /* Get the chip revision id and number. */
6697 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6698 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6699 id = ((val & 0xffff) << 16);
6700 val = REG_RD(bp, MISC_REG_CHIP_REV);
6701 id |= ((val & 0xf) << 12);
6702 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6703 id |= ((val & 0xff) << 4);
Eilon Greenstein5a40e082009-01-14 06:44:04 +00006704 val = REG_RD(bp, MISC_REG_BOND_ID);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006705 id |= (val & 0xf);
6706 bp->common.chip_id = id;
6707 bp->link_params.chip_id = bp->common.chip_id;
6708 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6709
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006710 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
6711
6712 /* Set doorbell size */
6713 bp->db_size = (1 << BNX2X_DB_SHIFT);
6714
6715 /*
6716 * set base FW non-default (fast path) status block id, this value is
6717 * used to initialize the fw_sb_id saved on the fp/queue structure to
6718 * determine the id used by the FW.
6719 */
6720 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
6721
Eilon Greenstein1c063282009-02-12 08:36:43 +00006722 val = (REG_RD(bp, 0x2874) & 0x55);
6723 if ((bp->common.chip_id & 0x1) ||
6724 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
6725 bp->flags |= ONE_PORT_FLAG;
6726 BNX2X_DEV_INFO("single port device\n");
6727 }
6728
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006729 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6730 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6731 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6732 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6733 bp->common.flash_size, bp->common.flash_size);
6734
6735 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
Eilon Greenstein2691d512009-08-12 08:22:08 +00006736 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006737 bp->link_params.shmem_base = bp->common.shmem_base;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006738 bp->link_params.shmem2_base = bp->common.shmem2_base;
Eilon Greenstein2691d512009-08-12 08:22:08 +00006739 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
6740 bp->common.shmem_base, bp->common.shmem2_base);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006741
6742 if (!bp->common.shmem_base ||
6743 (bp->common.shmem_base < 0xA0000) ||
6744 (bp->common.shmem_base >= 0xC0000)) {
6745 BNX2X_DEV_INFO("MCP not active\n");
6746 bp->flags |= NO_MCP_FLAG;
6747 return;
6748 }
6749
6750 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6751 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6752 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006753 BNX2X_ERROR("BAD MCP validity signature\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006754
6755 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00006756 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006757
6758 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6759 SHARED_HW_CFG_LED_MODE_MASK) >>
6760 SHARED_HW_CFG_LED_MODE_SHIFT);
6761
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00006762 bp->link_params.feature_config_flags = 0;
6763 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
6764 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
6765 bp->link_params.feature_config_flags |=
6766 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6767 else
6768 bp->link_params.feature_config_flags &=
6769 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6770
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006771 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6772 bp->common.bc_ver = val;
6773 BNX2X_DEV_INFO("bc_ver %X\n", val);
6774 if (val < BNX2X_BC_VER) {
6775 /* for now only warn
6776 * later we might need to enforce this */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006777 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
6778 "please upgrade BC\n", BNX2X_BC_VER, val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006779 }
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006780 bp->link_params.feature_config_flags |=
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006781 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006782 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006783 bp->link_params.feature_config_flags |=
6784 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
6785 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07006786
6787 if (BP_E1HVN(bp) == 0) {
6788 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6789 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6790 } else {
6791 /* no WOL capability for E1HVN != 0 */
6792 bp->flags |= NO_WOL_FLAG;
6793 }
6794 BNX2X_DEV_INFO("%sWoL capable\n",
Eilon Greensteinf5372252009-02-12 08:38:30 +00006795 (bp->flags & NO_WOL_FLAG) ? "not " : "");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006796
6797 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6798 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6799 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6800 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6801
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006802 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
6803 val, val2, val3, val4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006804}
6805
6806static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6807 u32 switch_cfg)
6808{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006809 int cfg_size = 0, idx, port = BP_PORT(bp);
6810
6811 /* Aggregation of supported attributes of all external phys */
6812 bp->port.supported[0] = 0;
6813 bp->port.supported[1] = 0;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00006814 switch (bp->link_params.num_phys) {
6815 case 1:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006816 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
6817 cfg_size = 1;
6818 break;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00006819 case 2:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006820 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
6821 cfg_size = 1;
6822 break;
6823 case 3:
6824 if (bp->link_params.multi_phy_config &
6825 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
6826 bp->port.supported[1] =
6827 bp->link_params.phy[EXT_PHY1].supported;
6828 bp->port.supported[0] =
6829 bp->link_params.phy[EXT_PHY2].supported;
6830 } else {
6831 bp->port.supported[0] =
6832 bp->link_params.phy[EXT_PHY1].supported;
6833 bp->port.supported[1] =
6834 bp->link_params.phy[EXT_PHY2].supported;
6835 }
6836 cfg_size = 2;
6837 break;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00006838 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006839
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006840 if (!(bp->port.supported[0] || bp->port.supported[1])) {
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00006841 BNX2X_ERR("NVRAM config error. BAD phy config."
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006842 "PHY1 config 0x%x, PHY2 config 0x%x\n",
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00006843 SHMEM_RD(bp,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006844 dev_info.port_hw_config[port].external_phy_config),
6845 SHMEM_RD(bp,
6846 dev_info.port_hw_config[port].external_phy_config2));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006847 return;
6848 }
6849
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00006850 switch (switch_cfg) {
6851 case SWITCH_CFG_1G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006852 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6853 port*0x10);
6854 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006855 break;
6856
6857 case SWITCH_CFG_10G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006858 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
6859 port*0x18);
6860 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006861
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006862 break;
6863
6864 default:
6865 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006866 bp->port.link_config[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006867 return;
6868 }
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006869 /* mask what we support according to speed_cap_mask per configuration */
6870 for (idx = 0; idx < cfg_size; idx++) {
6871 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006872 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006873 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006874
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006875 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006876 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006877 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006878
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006879 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006880 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006881 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006882
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006883 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006884 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006885 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006886
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006887 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006888 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006889 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006890 SUPPORTED_1000baseT_Full);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006891
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006892 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006893 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006894 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006895
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006896 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006897 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006898 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006899
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006900 }
6901
6902 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
6903 bp->port.supported[1]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006904}
6905
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006906static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006907{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006908 u32 link_config, idx, cfg_size = 0;
6909 bp->port.advertising[0] = 0;
6910 bp->port.advertising[1] = 0;
6911 switch (bp->link_params.num_phys) {
6912 case 1:
6913 case 2:
6914 cfg_size = 1;
6915 break;
6916 case 3:
6917 cfg_size = 2;
6918 break;
6919 }
6920 for (idx = 0; idx < cfg_size; idx++) {
6921 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
6922 link_config = bp->port.link_config[idx];
6923 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006924 case PORT_FEATURE_LINK_SPEED_AUTO:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006925 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
6926 bp->link_params.req_line_speed[idx] =
6927 SPEED_AUTO_NEG;
6928 bp->port.advertising[idx] |=
6929 bp->port.supported[idx];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006930 } else {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006931 /* force 10G, no AN */
6932 bp->link_params.req_line_speed[idx] =
6933 SPEED_10000;
6934 bp->port.advertising[idx] |=
6935 (ADVERTISED_10000baseT_Full |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006936 ADVERTISED_FIBRE);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006937 continue;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006938 }
6939 break;
6940
6941 case PORT_FEATURE_LINK_SPEED_10M_FULL:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006942 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
6943 bp->link_params.req_line_speed[idx] =
6944 SPEED_10;
6945 bp->port.advertising[idx] |=
6946 (ADVERTISED_10baseT_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006947 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006948 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006949 BNX2X_ERROR("NVRAM config error. "
6950 "Invalid link_config 0x%x"
6951 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006952 link_config,
6953 bp->link_params.speed_cap_mask[idx]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006954 return;
6955 }
6956 break;
6957
6958 case PORT_FEATURE_LINK_SPEED_10M_HALF:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006959 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
6960 bp->link_params.req_line_speed[idx] =
6961 SPEED_10;
6962 bp->link_params.req_duplex[idx] =
6963 DUPLEX_HALF;
6964 bp->port.advertising[idx] |=
6965 (ADVERTISED_10baseT_Half |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006966 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006967 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006968 BNX2X_ERROR("NVRAM config error. "
6969 "Invalid link_config 0x%x"
6970 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006971 link_config,
6972 bp->link_params.speed_cap_mask[idx]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006973 return;
6974 }
6975 break;
6976
6977 case PORT_FEATURE_LINK_SPEED_100M_FULL:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006978 if (bp->port.supported[idx] & SUPPORTED_100baseT_Full) {
6979 bp->link_params.req_line_speed[idx] =
6980 SPEED_100;
6981 bp->port.advertising[idx] |=
6982 (ADVERTISED_100baseT_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006983 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006984 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006985 BNX2X_ERROR("NVRAM config error. "
6986 "Invalid link_config 0x%x"
6987 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006988 link_config,
6989 bp->link_params.speed_cap_mask[idx]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006990 return;
6991 }
6992 break;
6993
6994 case PORT_FEATURE_LINK_SPEED_100M_HALF:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006995 if (bp->port.supported[idx] & SUPPORTED_100baseT_Half) {
6996 bp->link_params.req_line_speed[idx] = SPEED_100;
6997 bp->link_params.req_duplex[idx] = DUPLEX_HALF;
6998 bp->port.advertising[idx] |=
6999 (ADVERTISED_100baseT_Half |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007000 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007001 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007002 BNX2X_ERROR("NVRAM config error. "
7003 "Invalid link_config 0x%x"
7004 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007005 link_config,
7006 bp->link_params.speed_cap_mask[idx]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007007 return;
7008 }
7009 break;
7010
7011 case PORT_FEATURE_LINK_SPEED_1G:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007012 if (bp->port.supported[idx] &
7013 SUPPORTED_1000baseT_Full) {
7014 bp->link_params.req_line_speed[idx] =
7015 SPEED_1000;
7016 bp->port.advertising[idx] |=
7017 (ADVERTISED_1000baseT_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007018 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007019 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007020 BNX2X_ERROR("NVRAM config error. "
7021 "Invalid link_config 0x%x"
7022 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007023 link_config,
7024 bp->link_params.speed_cap_mask[idx]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007025 return;
7026 }
7027 break;
7028
7029 case PORT_FEATURE_LINK_SPEED_2_5G:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007030 if (bp->port.supported[idx] &
7031 SUPPORTED_2500baseX_Full) {
7032 bp->link_params.req_line_speed[idx] =
7033 SPEED_2500;
7034 bp->port.advertising[idx] |=
7035 (ADVERTISED_2500baseX_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007036 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007037 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007038 BNX2X_ERROR("NVRAM config error. "
7039 "Invalid link_config 0x%x"
7040 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007041 link_config,
7042 bp->link_params.speed_cap_mask[idx]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007043 return;
7044 }
7045 break;
7046
7047 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7048 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7049 case PORT_FEATURE_LINK_SPEED_10G_KR:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007050 if (bp->port.supported[idx] &
7051 SUPPORTED_10000baseT_Full) {
7052 bp->link_params.req_line_speed[idx] =
7053 SPEED_10000;
7054 bp->port.advertising[idx] |=
7055 (ADVERTISED_10000baseT_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007056 ADVERTISED_FIBRE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007057 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007058 BNX2X_ERROR("NVRAM config error. "
7059 "Invalid link_config 0x%x"
7060 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007061 link_config,
7062 bp->link_params.speed_cap_mask[idx]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007063 return;
7064 }
7065 break;
7066
7067 default:
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007068 BNX2X_ERROR("NVRAM config error. "
7069 "BAD link speed link_config 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007070 link_config);
7071 bp->link_params.req_line_speed[idx] = SPEED_AUTO_NEG;
7072 bp->port.advertising[idx] = bp->port.supported[idx];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007073 break;
7074 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007075
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007076 bp->link_params.req_flow_ctrl[idx] = (link_config &
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007077 PORT_FEATURE_FLOW_CONTROL_MASK);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007078 if ((bp->link_params.req_flow_ctrl[idx] ==
7079 BNX2X_FLOW_CTRL_AUTO) &&
7080 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
7081 bp->link_params.req_flow_ctrl[idx] =
7082 BNX2X_FLOW_CTRL_NONE;
7083 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007084
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007085 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
7086 " 0x%x advertising 0x%x\n",
7087 bp->link_params.req_line_speed[idx],
7088 bp->link_params.req_duplex[idx],
7089 bp->link_params.req_flow_ctrl[idx],
7090 bp->port.advertising[idx]);
7091 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007092}
7093
Michael Chane665bfd2009-10-10 13:46:54 +00007094static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
7095{
7096 mac_hi = cpu_to_be16(mac_hi);
7097 mac_lo = cpu_to_be32(mac_lo);
7098 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
7099 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
7100}
7101
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007102static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007103{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007104 int port = BP_PORT(bp);
7105 u32 val, val2;
Eilon Greenstein589abe32009-02-12 08:36:55 +00007106 u32 config;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007107 u32 ext_phy_type, ext_phy_config;;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007108
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007109 bp->link_params.bp = bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007110 bp->link_params.port = port;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007111
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007112 bp->link_params.lane_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007113 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
Eilon Greenstein4d295db2009-07-21 05:47:47 +00007114
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007115 bp->link_params.speed_cap_mask[0] =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007116 SHMEM_RD(bp,
7117 dev_info.port_hw_config[port].speed_capability_mask);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007118 bp->link_params.speed_cap_mask[1] =
7119 SHMEM_RD(bp,
7120 dev_info.port_hw_config[port].speed_capability_mask2);
7121 bp->port.link_config[0] =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007122 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7123
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007124 bp->port.link_config[1] =
7125 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00007126
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007127 bp->link_params.multi_phy_config =
7128 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00007129 /* If the device is capable of WoL, set the default state according
7130 * to the HW
7131 */
Eilon Greenstein4d295db2009-07-21 05:47:47 +00007132 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00007133 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
7134 (config & PORT_FEATURE_WOL_ENABLED));
7135
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007136 BNX2X_DEV_INFO("lane_config 0x%08x"
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007137 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007138 bp->link_params.lane_config,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007139 bp->link_params.speed_cap_mask[0],
7140 bp->port.link_config[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007141
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007142 bp->link_params.switch_cfg = (bp->port.link_config[0] &
Eilon Greenstein4d295db2009-07-21 05:47:47 +00007143 PORT_FEATURE_CONNECTED_SWITCH_MASK);
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007144 bnx2x_phy_probe(&bp->link_params);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007145 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007146
7147 bnx2x_link_settings_requested(bp);
7148
Eilon Greenstein01cd4522009-08-12 08:23:08 +00007149 /*
7150 * If connected directly, work with the internal PHY, otherwise, work
7151 * with the external PHY
7152 */
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007153 ext_phy_config =
7154 SHMEM_RD(bp,
7155 dev_info.port_hw_config[port].external_phy_config);
7156 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00007157 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007158 bp->mdio.prtad = bp->port.phy_addr;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00007159
7160 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
7161 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
7162 bp->mdio.prtad =
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007163 XGXS_EXT_PHY_ADDR(ext_phy_config);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00007164
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007165 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7166 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
Michael Chane665bfd2009-10-10 13:46:54 +00007167 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007168 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7169 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
Michael Chan37b091b2009-10-10 13:46:55 +00007170
7171#ifdef BCM_CNIC
7172 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
7173 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
7174 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
7175#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007176}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007177
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007178static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7179{
7180 int func = BP_FUNC(bp);
7181 u32 val, val2;
7182 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007183
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007184 bnx2x_get_common_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007185
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007186 bp->common.int_block = INT_BLOCK_HC;
7187
7188 bp->igu_dsb_id = DEF_SB_IGU_ID;
7189 bp->igu_base_sb = 0;
7190 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, bp->l2_cid_count);
7191
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007192 bp->e1hov = 0;
7193 bp->e1hmf = 0;
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00007194 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007195
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007196 bp->common.mf_cfg_base = bp->common.shmem_base +
7197 offsetof(struct shmem_region, func_mb) +
7198 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
7199 bp->mf_config =
7200 MF_CFG_RD(bp, func_mf_config[func].config);
7201
7202 val = (MF_CFG_RD(bp, func_mf_config[FUNC_0].e1hov_tag) &
Eilon Greenstein3196a882008-08-13 15:58:49 -07007203 FUNC_MF_CFG_E1HOV_TAG_MASK);
Eilon Greenstein2691d512009-08-12 08:22:08 +00007204 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007205 bp->e1hmf = 1;
Eilon Greenstein2691d512009-08-12 08:22:08 +00007206 BNX2X_DEV_INFO("%s function mode\n",
7207 IS_E1HMF(bp) ? "multi" : "single");
7208
7209 if (IS_E1HMF(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007210 val = (MF_CFG_RD(bp, func_mf_config[func].
Eilon Greenstein2691d512009-08-12 08:22:08 +00007211 e1hov_tag) &
7212 FUNC_MF_CFG_E1HOV_TAG_MASK);
7213 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7214 bp->e1hov = val;
7215 BNX2X_DEV_INFO("E1HOV for func %d is %d "
7216 "(0x%04x)\n",
7217 func, bp->e1hov, bp->e1hov);
7218 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007219 BNX2X_ERROR("No valid E1HOV for func %d,"
7220 " aborting\n", func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007221 rc = -EPERM;
7222 }
Eilon Greenstein2691d512009-08-12 08:22:08 +00007223 } else {
7224 if (BP_E1HVN(bp)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007225 BNX2X_ERROR("VN %d in single function mode,"
7226 " aborting\n", BP_E1HVN(bp));
Eilon Greenstein2691d512009-08-12 08:22:08 +00007227 rc = -EPERM;
7228 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007229 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007230 }
7231
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007232 /* adjust igu_sb_cnt to MF */
7233 if (IS_E1HMF(bp))
7234 bp->igu_sb_cnt /= E1HVN_MAX;
7235
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007236 if (!BP_NOMCP(bp)) {
7237 bnx2x_get_port_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007238
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007239 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7240 DRV_MSG_SEQ_NUMBER_MASK);
7241 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7242 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007243
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007244 if (IS_E1HMF(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007245 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
7246 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007247 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7248 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7249 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7250 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7251 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7252 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7253 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7254 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7255 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7256 ETH_ALEN);
7257 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7258 ETH_ALEN);
7259 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007260
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007261 return rc;
7262 }
7263
7264 if (BP_NOMCP(bp)) {
7265 /* only supposed to happen on emulation/FPGA */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007266 BNX2X_ERROR("warning: random MAC workaround active\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007267 random_ether_addr(bp->dev->dev_addr);
7268 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7269 }
7270
7271 return rc;
7272}
7273
Vladislav Zolotarov34f24c72010-04-19 01:13:23 +00007274static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
7275{
7276 int cnt, i, block_end, rodi;
7277 char vpd_data[BNX2X_VPD_LEN+1];
7278 char str_id_reg[VENDOR_ID_LEN+1];
7279 char str_id_cap[VENDOR_ID_LEN+1];
7280 u8 len;
7281
7282 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
7283 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
7284
7285 if (cnt < BNX2X_VPD_LEN)
7286 goto out_not_found;
7287
7288 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
7289 PCI_VPD_LRDT_RO_DATA);
7290 if (i < 0)
7291 goto out_not_found;
7292
7293
7294 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
7295 pci_vpd_lrdt_size(&vpd_data[i]);
7296
7297 i += PCI_VPD_LRDT_TAG_SIZE;
7298
7299 if (block_end > BNX2X_VPD_LEN)
7300 goto out_not_found;
7301
7302 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
7303 PCI_VPD_RO_KEYWORD_MFR_ID);
7304 if (rodi < 0)
7305 goto out_not_found;
7306
7307 len = pci_vpd_info_field_size(&vpd_data[rodi]);
7308
7309 if (len != VENDOR_ID_LEN)
7310 goto out_not_found;
7311
7312 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
7313
7314 /* vendor specific info */
7315 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
7316 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
7317 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
7318 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
7319
7320 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
7321 PCI_VPD_RO_KEYWORD_VENDOR0);
7322 if (rodi >= 0) {
7323 len = pci_vpd_info_field_size(&vpd_data[rodi]);
7324
7325 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
7326
7327 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
7328 memcpy(bp->fw_ver, &vpd_data[rodi], len);
7329 bp->fw_ver[len] = ' ';
7330 }
7331 }
7332 return;
7333 }
7334out_not_found:
7335 return;
7336}
7337
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007338static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7339{
7340 int func = BP_FUNC(bp);
Eilon Greenstein87942b42009-02-12 08:36:49 +00007341 int timer_interval;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007342 int rc;
7343
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007344 /* Disable interrupt handling until HW is initialized */
7345 atomic_set(&bp->intr_sem, 1);
Eilon Greensteine1510702009-07-21 05:47:41 +00007346 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007347
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007348 mutex_init(&bp->port.phy_mutex);
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07007349 mutex_init(&bp->fw_mb_mutex);
David S. Millerbb7e95c2010-07-27 21:01:35 -07007350 spin_lock_init(&bp->stats_lock);
Michael Chan993ac7b2009-10-10 13:46:56 +00007351#ifdef BCM_CNIC
7352 mutex_init(&bp->cnic_mutex);
7353#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007354
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08007355 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007356 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007357
7358 rc = bnx2x_get_hwinfo(bp);
7359
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007360 if (!rc)
7361 rc = bnx2x_alloc_mem_bp(bp);
7362
Vladislav Zolotarov34f24c72010-04-19 01:13:23 +00007363 bnx2x_read_fwinfo(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007364 /* need to reset chip if undi was active */
7365 if (!BP_NOMCP(bp))
7366 bnx2x_undi_unload(bp);
7367
7368 if (CHIP_REV_IS_FPGA(bp))
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007369 dev_err(&bp->pdev->dev, "FPGA detected\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007370
7371 if (BP_NOMCP(bp) && (func == 0))
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007372 dev_err(&bp->pdev->dev, "MCP disabled, "
7373 "must load devices in order!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007374
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007375 /* Set multi queue mode */
Eilon Greenstein8badd272009-02-12 08:36:15 +00007376 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
7377 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007378 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
7379 "requested is not MSI-X\n");
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007380 multi_mode = ETH_RSS_MODE_DISABLED;
7381 }
7382 bp->multi_mode = multi_mode;
Dmitry Kravkov5d7cd492010-07-27 12:32:19 +00007383 bp->int_mode = int_mode;
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007384
Dmitry Kravkov4fd89b7a2010-04-01 19:45:34 -07007385 bp->dev->features |= NETIF_F_GRO;
7386
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07007387 /* Set TPA flags */
7388 if (disable_tpa) {
7389 bp->flags &= ~TPA_ENABLE_FLAG;
7390 bp->dev->features &= ~NETIF_F_LRO;
7391 } else {
7392 bp->flags |= TPA_ENABLE_FLAG;
7393 bp->dev->features |= NETIF_F_LRO;
7394 }
Dmitry Kravkov5d7cd492010-07-27 12:32:19 +00007395 bp->disable_tpa = disable_tpa;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07007396
Eilon Greensteina18f5122009-08-12 08:23:26 +00007397 if (CHIP_IS_E1(bp))
7398 bp->dropless_fc = 0;
7399 else
7400 bp->dropless_fc = dropless_fc;
7401
Eilon Greenstein8d5726c2009-02-12 08:37:19 +00007402 bp->mrrs = mrrs;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07007403
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007404 bp->tx_ring_size = MAX_TX_AVAIL;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007405
7406 bp->rx_csum = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007407
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00007408 /* make sure that the numbers are in the right granularity */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007409 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
7410 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007411
Eilon Greenstein87942b42009-02-12 08:36:49 +00007412 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7413 bp->current_interval = (poll ? poll : timer_interval);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007414
7415 init_timer(&bp->timer);
7416 bp->timer.expires = jiffies + bp->current_interval;
7417 bp->timer.data = (unsigned long) bp;
7418 bp->timer.function = bnx2x_timer;
7419
7420 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007421}
7422
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007423
Dmitry Kravkovde0c62d2010-07-27 12:35:24 +00007424/****************************************************************************
7425* General service functions
7426****************************************************************************/
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007427
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07007428/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007429static int bnx2x_open(struct net_device *dev)
7430{
7431 struct bnx2x *bp = netdev_priv(dev);
7432
Eilon Greenstein6eccabb2009-01-22 03:37:48 +00007433 netif_carrier_off(dev);
7434
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007435 bnx2x_set_power_state(bp, PCI_D0);
7436
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007437 if (!bnx2x_reset_is_done(bp)) {
7438 do {
7439 /* Reset MCP mail box sequence if there is on going
7440 * recovery
7441 */
7442 bp->fw_seq = 0;
7443
7444 /* If it's the first function to load and reset done
7445 * is still not cleared it may mean that. We don't
7446 * check the attention state here because it may have
7447 * already been cleared by a "common" reset but we
7448 * shell proceed with "process kill" anyway.
7449 */
7450 if ((bnx2x_get_load_cnt(bp) == 0) &&
7451 bnx2x_trylock_hw_lock(bp,
7452 HW_LOCK_RESOURCE_RESERVED_08) &&
7453 (!bnx2x_leader_reset(bp))) {
7454 DP(NETIF_MSG_HW, "Recovered in open\n");
7455 break;
7456 }
7457
7458 bnx2x_set_power_state(bp, PCI_D3hot);
7459
7460 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
7461 " completed yet. Try again later. If u still see this"
7462 " message after a few retries then power cycle is"
7463 " required.\n", bp->dev->name);
7464
7465 return -EAGAIN;
7466 } while (0);
7467 }
7468
7469 bp->recovery_state = BNX2X_RECOVERY_DONE;
7470
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07007471 return bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007472}
7473
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07007474/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007475static int bnx2x_close(struct net_device *dev)
7476{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007477 struct bnx2x *bp = netdev_priv(dev);
7478
7479 /* Unload the driver, release IRQs */
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07007480 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
Vladislav Zolotarovd3dbfee2010-04-19 01:14:49 +00007481 bnx2x_set_power_state(bp, PCI_D3hot);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007482
7483 return 0;
7484}
7485
Eilon Greensteinf5372252009-02-12 08:38:30 +00007486/* called with netif_tx_lock from dev_mcast.c */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00007487void bnx2x_set_rx_mode(struct net_device *dev)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007488{
7489 struct bnx2x *bp = netdev_priv(dev);
7490 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
7491 int port = BP_PORT(bp);
7492
7493 if (bp->state != BNX2X_STATE_OPEN) {
7494 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
7495 return;
7496 }
7497
7498 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
7499
7500 if (dev->flags & IFF_PROMISC)
7501 rx_mode = BNX2X_RX_MODE_PROMISC;
7502
7503 else if ((dev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00007504 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
7505 CHIP_IS_E1(bp)))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007506 rx_mode = BNX2X_RX_MODE_ALLMULTI;
7507
7508 else { /* some multicasts */
7509 if (CHIP_IS_E1(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007510 /*
7511 * set mc list, do not wait as wait implies sleep
7512 * and set_rx_mode can be invoked from non-sleepable
7513 * context
7514 */
7515 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
7516 BNX2X_MAX_EMUL_MULTI*(1 + port) :
7517 BNX2X_MAX_MULTICAST*(1 + port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007518
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007519 bnx2x_set_e1_mc_list(bp, offset);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007520 } else { /* E1H */
7521 /* Accept one or more multicasts */
Jiri Pirko22bedad32010-04-01 21:22:57 +00007522 struct netdev_hw_addr *ha;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007523 u32 mc_filter[MC_HASH_SIZE];
7524 u32 crc, bit, regidx;
7525 int i;
7526
7527 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
7528
Jiri Pirko22bedad32010-04-01 21:22:57 +00007529 netdev_for_each_mc_addr(ha, dev) {
Johannes Berg7c510e42008-10-27 17:47:26 -07007530 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007531 bnx2x_mc_addr(ha));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007532
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007533 crc = crc32c_le(0, bnx2x_mc_addr(ha),
7534 ETH_ALEN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007535 bit = (crc >> 24) & 0xff;
7536 regidx = bit >> 5;
7537 bit &= 0x1f;
7538 mc_filter[regidx] |= (1 << bit);
7539 }
7540
7541 for (i = 0; i < MC_HASH_SIZE; i++)
7542 REG_WR(bp, MC_HASH_OFFSET(bp, i),
7543 mc_filter[i]);
7544 }
7545 }
7546
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007547
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007548 bp->rx_mode = rx_mode;
7549 bnx2x_set_storm_rx_mode(bp);
7550}
7551
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007552
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007553/* called with rtnl_lock */
Eilon Greenstein01cd4522009-08-12 08:23:08 +00007554static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
7555 int devad, u16 addr)
7556{
7557 struct bnx2x *bp = netdev_priv(netdev);
7558 u16 value;
7559 int rc;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00007560
7561 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
7562 prtad, devad, addr);
7563
Eilon Greenstein01cd4522009-08-12 08:23:08 +00007564 /* The HW expects different devad if CL22 is used */
7565 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
7566
7567 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnere10bc842010-09-07 11:40:50 +00007568 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00007569 bnx2x_release_phy_lock(bp);
7570 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
7571
7572 if (!rc)
7573 rc = value;
7574 return rc;
7575}
7576
7577/* called with rtnl_lock */
7578static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
7579 u16 addr, u16 value)
7580{
7581 struct bnx2x *bp = netdev_priv(netdev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00007582 int rc;
7583
7584 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
7585 " value 0x%x\n", prtad, devad, addr, value);
7586
Eilon Greenstein01cd4522009-08-12 08:23:08 +00007587 /* The HW expects different devad if CL22 is used */
7588 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
7589
7590 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnere10bc842010-09-07 11:40:50 +00007591 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00007592 bnx2x_release_phy_lock(bp);
7593 return rc;
7594}
7595
7596/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007597static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7598{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007599 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00007600 struct mii_ioctl_data *mdio = if_mii(ifr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007601
Eilon Greenstein01cd4522009-08-12 08:23:08 +00007602 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
7603 mdio->phy_id, mdio->reg_num, mdio->val_in);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007604
Eilon Greenstein01cd4522009-08-12 08:23:08 +00007605 if (!netif_running(dev))
7606 return -EAGAIN;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007607
Eilon Greenstein01cd4522009-08-12 08:23:08 +00007608 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007609}
7610
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +00007611#ifdef CONFIG_NET_POLL_CONTROLLER
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007612static void poll_bnx2x(struct net_device *dev)
7613{
7614 struct bnx2x *bp = netdev_priv(dev);
7615
7616 disable_irq(bp->pdev->irq);
7617 bnx2x_interrupt(bp->pdev->irq, dev);
7618 enable_irq(bp->pdev->irq);
7619}
7620#endif
7621
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08007622static const struct net_device_ops bnx2x_netdev_ops = {
7623 .ndo_open = bnx2x_open,
7624 .ndo_stop = bnx2x_close,
7625 .ndo_start_xmit = bnx2x_start_xmit,
Eilon Greenstein356e2382009-02-12 08:38:32 +00007626 .ndo_set_multicast_list = bnx2x_set_rx_mode,
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08007627 .ndo_set_mac_address = bnx2x_change_mac_addr,
7628 .ndo_validate_addr = eth_validate_addr,
7629 .ndo_do_ioctl = bnx2x_ioctl,
7630 .ndo_change_mtu = bnx2x_change_mtu,
7631 .ndo_tx_timeout = bnx2x_tx_timeout,
7632#ifdef BCM_VLAN
7633 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
7634#endif
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +00007635#ifdef CONFIG_NET_POLL_CONTROLLER
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08007636 .ndo_poll_controller = poll_bnx2x,
7637#endif
7638};
7639
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007640static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
7641 struct net_device *dev)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007642{
7643 struct bnx2x *bp;
7644 int rc;
7645
7646 SET_NETDEV_DEV(dev, &pdev->dev);
7647 bp = netdev_priv(dev);
7648
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007649 bp->dev = dev;
7650 bp->pdev = pdev;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007651 bp->flags = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007652 bp->func = PCI_FUNC(pdev->devfn);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007653
7654 rc = pci_enable_device(pdev);
7655 if (rc) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007656 dev_err(&bp->pdev->dev,
7657 "Cannot enable PCI device, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007658 goto err_out;
7659 }
7660
7661 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007662 dev_err(&bp->pdev->dev,
7663 "Cannot find PCI device base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007664 rc = -ENODEV;
7665 goto err_out_disable;
7666 }
7667
7668 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007669 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
7670 " base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007671 rc = -ENODEV;
7672 goto err_out_disable;
7673 }
7674
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007675 if (atomic_read(&pdev->enable_cnt) == 1) {
7676 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7677 if (rc) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007678 dev_err(&bp->pdev->dev,
7679 "Cannot obtain PCI resources, aborting\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007680 goto err_out_disable;
7681 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007682
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007683 pci_set_master(pdev);
7684 pci_save_state(pdev);
7685 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007686
7687 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7688 if (bp->pm_cap == 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007689 dev_err(&bp->pdev->dev,
7690 "Cannot find power management capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007691 rc = -EIO;
7692 goto err_out_release;
7693 }
7694
7695 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
7696 if (bp->pcie_cap == 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007697 dev_err(&bp->pdev->dev,
7698 "Cannot find PCI Express capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007699 rc = -EIO;
7700 goto err_out_release;
7701 }
7702
FUJITA Tomonori1a983142010-04-04 01:51:03 +00007703 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007704 bp->flags |= USING_DAC_FLAG;
FUJITA Tomonori1a983142010-04-04 01:51:03 +00007705 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007706 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
7707 " failed, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007708 rc = -EIO;
7709 goto err_out_release;
7710 }
7711
FUJITA Tomonori1a983142010-04-04 01:51:03 +00007712 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007713 dev_err(&bp->pdev->dev,
7714 "System does not support DMA, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007715 rc = -EIO;
7716 goto err_out_release;
7717 }
7718
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007719 dev->mem_start = pci_resource_start(pdev, 0);
7720 dev->base_addr = dev->mem_start;
7721 dev->mem_end = pci_resource_end(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007722
7723 dev->irq = pdev->irq;
7724
Arjan van de Ven275f1652008-10-20 21:42:39 -07007725 bp->regview = pci_ioremap_bar(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007726 if (!bp->regview) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007727 dev_err(&bp->pdev->dev,
7728 "Cannot map register space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007729 rc = -ENOMEM;
7730 goto err_out_release;
7731 }
7732
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007733 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007734 min_t(u64, BNX2X_DB_SIZE(bp),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007735 pci_resource_len(pdev, 2)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007736 if (!bp->doorbells) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007737 dev_err(&bp->pdev->dev,
7738 "Cannot map doorbell space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007739 rc = -ENOMEM;
7740 goto err_out_unmap;
7741 }
7742
7743 bnx2x_set_power_state(bp, PCI_D0);
7744
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007745 /* clean indirect addresses */
7746 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
7747 PCICFG_VENDOR_ID_OFFSET);
7748 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
7749 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
7750 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
7751 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007752
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007753 /* Reset the load counter */
7754 bnx2x_clear_load_cnt(bp);
7755
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007756 dev->watchdog_timeo = TX_TIMEOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007757
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08007758 dev->netdev_ops = &bnx2x_netdev_ops;
Dmitry Kravkovde0c62d2010-07-27 12:35:24 +00007759 bnx2x_set_ethtool_ops(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007760 dev->features |= NETIF_F_SG;
7761 dev->features |= NETIF_F_HW_CSUM;
7762 if (bp->flags & USING_DAC_FLAG)
7763 dev->features |= NETIF_F_HIGHDMA;
Eilon Greenstein5316bc02009-07-21 05:47:43 +00007764 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7765 dev->features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007766#ifdef BCM_VLAN
7767 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08007768 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
Eilon Greenstein5316bc02009-07-21 05:47:43 +00007769
7770 dev->vlan_features |= NETIF_F_SG;
7771 dev->vlan_features |= NETIF_F_HW_CSUM;
7772 if (bp->flags & USING_DAC_FLAG)
7773 dev->vlan_features |= NETIF_F_HIGHDMA;
7774 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7775 dev->vlan_features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007776#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007777
Eilon Greenstein01cd4522009-08-12 08:23:08 +00007778 /* get_port_hwinfo() will set prtad and mmds properly */
7779 bp->mdio.prtad = MDIO_PRTAD_NONE;
7780 bp->mdio.mmds = 0;
7781 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
7782 bp->mdio.dev = dev;
7783 bp->mdio.mdio_read = bnx2x_mdio_read;
7784 bp->mdio.mdio_write = bnx2x_mdio_write;
7785
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007786 return 0;
7787
7788err_out_unmap:
7789 if (bp->regview) {
7790 iounmap(bp->regview);
7791 bp->regview = NULL;
7792 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007793 if (bp->doorbells) {
7794 iounmap(bp->doorbells);
7795 bp->doorbells = NULL;
7796 }
7797
7798err_out_release:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007799 if (atomic_read(&pdev->enable_cnt) == 1)
7800 pci_release_regions(pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007801
7802err_out_disable:
7803 pci_disable_device(pdev);
7804 pci_set_drvdata(pdev, NULL);
7805
7806err_out:
7807 return rc;
7808}
7809
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00007810static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
7811 int *width, int *speed)
Eliezer Tamir25047952008-02-28 11:50:16 -08007812{
7813 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
7814
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00007815 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
7816
7817 /* return value of 1=2.5GHz 2=5GHz */
7818 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
Eliezer Tamir25047952008-02-28 11:50:16 -08007819}
7820
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00007821static int bnx2x_check_firmware(struct bnx2x *bp)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007822{
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00007823 const struct firmware *firmware = bp->firmware;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007824 struct bnx2x_fw_file_hdr *fw_hdr;
7825 struct bnx2x_fw_file_section *sections;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007826 u32 offset, len, num_ops;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00007827 u16 *ops_offsets;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007828 int i;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00007829 const u8 *fw_ver;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007830
7831 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
7832 return -EINVAL;
7833
7834 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
7835 sections = (struct bnx2x_fw_file_section *)fw_hdr;
7836
7837 /* Make sure none of the offsets and sizes make us read beyond
7838 * the end of the firmware data */
7839 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
7840 offset = be32_to_cpu(sections[i].offset);
7841 len = be32_to_cpu(sections[i].len);
7842 if (offset + len > firmware->size) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007843 dev_err(&bp->pdev->dev,
7844 "Section %d length is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007845 return -EINVAL;
7846 }
7847 }
7848
7849 /* Likewise for the init_ops offsets */
7850 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
7851 ops_offsets = (u16 *)(firmware->data + offset);
7852 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
7853
7854 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
7855 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007856 dev_err(&bp->pdev->dev,
7857 "Section offset %d is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007858 return -EINVAL;
7859 }
7860 }
7861
7862 /* Check FW version */
7863 offset = be32_to_cpu(fw_hdr->fw_version.offset);
7864 fw_ver = firmware->data + offset;
7865 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
7866 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
7867 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
7868 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007869 dev_err(&bp->pdev->dev,
7870 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007871 fw_ver[0], fw_ver[1], fw_ver[2],
7872 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
7873 BCM_5710_FW_MINOR_VERSION,
7874 BCM_5710_FW_REVISION_VERSION,
7875 BCM_5710_FW_ENGINEERING_VERSION);
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007876 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007877 }
7878
7879 return 0;
7880}
7881
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007882static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007883{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007884 const __be32 *source = (const __be32 *)_source;
7885 u32 *target = (u32 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007886 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007887
7888 for (i = 0; i < n/4; i++)
7889 target[i] = be32_to_cpu(source[i]);
7890}
7891
7892/*
7893 Ops array is stored in the following format:
7894 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
7895 */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007896static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007897{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007898 const __be32 *source = (const __be32 *)_source;
7899 struct raw_op *target = (struct raw_op *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007900 u32 i, j, tmp;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007901
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007902 for (i = 0, j = 0; i < n/8; i++, j += 2) {
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007903 tmp = be32_to_cpu(source[j]);
7904 target[i].op = (tmp >> 24) & 0xff;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007905 target[i].offset = tmp & 0xffffff;
7906 target[i].raw_data = be32_to_cpu(source[j + 1]);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007907 }
7908}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007909
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007910/**
7911 * IRO array is stored in the following format:
7912 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
7913 */
7914static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
7915{
7916 const __be32 *source = (const __be32 *)_source;
7917 struct iro *target = (struct iro *)_target;
7918 u32 i, j, tmp;
7919
7920 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
7921 target[i].base = be32_to_cpu(source[j]);
7922 j++;
7923 tmp = be32_to_cpu(source[j]);
7924 target[i].m1 = (tmp >> 16) & 0xffff;
7925 target[i].m2 = tmp & 0xffff;
7926 j++;
7927 tmp = be32_to_cpu(source[j]);
7928 target[i].m3 = (tmp >> 16) & 0xffff;
7929 target[i].size = tmp & 0xffff;
7930 j++;
7931 }
7932}
7933
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007934static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007935{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007936 const __be16 *source = (const __be16 *)_source;
7937 u16 *target = (u16 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007938 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007939
7940 for (i = 0; i < n/2; i++)
7941 target[i] = be16_to_cpu(source[i]);
7942}
7943
Joe Perches7995c642010-02-17 15:01:52 +00007944#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
7945do { \
7946 u32 len = be32_to_cpu(fw_hdr->arr.len); \
7947 bp->arr = kmalloc(len, GFP_KERNEL); \
7948 if (!bp->arr) { \
7949 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
7950 goto lbl; \
7951 } \
7952 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
7953 (u8 *)bp->arr, len); \
7954} while (0)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007955
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00007956int bnx2x_init_firmware(struct bnx2x *bp)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007957{
Ben Hutchings45229b42009-11-07 11:53:39 +00007958 const char *fw_file_name;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007959 struct bnx2x_fw_file_hdr *fw_hdr;
Ben Hutchings45229b42009-11-07 11:53:39 +00007960 int rc;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007961
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007962 if (CHIP_IS_E1(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +00007963 fw_file_name = FW_FILE_NAME_E1;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007964 else if (CHIP_IS_E1H(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +00007965 fw_file_name = FW_FILE_NAME_E1H;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007966 else {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00007967 BNX2X_ERR("Unsupported chip revision\n");
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007968 return -EINVAL;
7969 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007970
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00007971 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007972
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00007973 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007974 if (rc) {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00007975 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007976 goto request_firmware_exit;
7977 }
7978
7979 rc = bnx2x_check_firmware(bp);
7980 if (rc) {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00007981 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007982 goto request_firmware_exit;
7983 }
7984
7985 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
7986
7987 /* Initialize the pointers to the init arrays */
7988 /* Blob */
7989 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
7990
7991 /* Opcodes */
7992 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
7993
7994 /* Offsets */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007995 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
7996 be16_to_cpu_n);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007997
7998 /* STORMs firmware */
Eilon Greenstein573f2032009-08-12 08:24:14 +00007999 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8000 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
8001 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
8002 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
8003 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8004 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
8005 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
8006 be32_to_cpu(fw_hdr->usem_pram_data.offset);
8007 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8008 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
8009 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
8010 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
8011 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8012 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
8013 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
8014 be32_to_cpu(fw_hdr->csem_pram_data.offset);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008015 /* IRO */
8016 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008017
8018 return 0;
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008019
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008020iro_alloc_err:
8021 kfree(bp->init_ops_offsets);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008022init_offsets_alloc_err:
8023 kfree(bp->init_ops);
8024init_ops_alloc_err:
8025 kfree(bp->init_data);
8026request_firmware_exit:
8027 release_firmware(bp->firmware);
8028
8029 return rc;
8030}
8031
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008032static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
8033{
8034 int cid_count = L2_FP_COUNT(l2_cid_count);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008035
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008036#ifdef BCM_CNIC
8037 cid_count += CNIC_CID_MAX;
8038#endif
8039 return roundup(cid_count, QM_CID_ROUND);
8040}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008041static int __devinit bnx2x_init_one(struct pci_dev *pdev,
8042 const struct pci_device_id *ent)
8043{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008044 struct net_device *dev = NULL;
8045 struct bnx2x *bp;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008046 int pcie_width, pcie_speed;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008047 int rc, cid_count;
8048
8049 cid_count = FP_SB_MAX_E1x + CNIC_CONTEXT_USE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008050
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008051 /* dev zeroed in init_etherdev */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008052 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008053 if (!dev) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008054 dev_err(&pdev->dev, "Cannot allocate net device\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008055 return -ENOMEM;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008056 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008057
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008058 bp = netdev_priv(dev);
Joe Perches7995c642010-02-17 15:01:52 +00008059 bp->msg_enable = debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008060
Eilon Greensteindf4770de2009-08-12 08:23:28 +00008061 pci_set_drvdata(pdev, dev);
8062
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008063 bp->l2_cid_count = cid_count;
8064
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008065 rc = bnx2x_init_dev(pdev, dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008066 if (rc < 0) {
8067 free_netdev(dev);
8068 return rc;
8069 }
8070
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008071 rc = bnx2x_init_bp(bp);
Eilon Greenstein693fc0d2009-01-14 06:43:52 +00008072 if (rc)
8073 goto init_one_exit;
8074
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008075 /* calc qm_cid_count */
8076 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
8077
Eilon Greenstein693fc0d2009-01-14 06:43:52 +00008078 rc = register_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008079 if (rc) {
Eilon Greenstein693fc0d2009-01-14 06:43:52 +00008080 dev_err(&pdev->dev, "Cannot register net device\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008081 goto init_one_exit;
8082 }
8083
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008084 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008085 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
8086 " IRQ %d, ", board_info[ent->driver_data].name,
8087 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
8088 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
8089 dev->base_addr, bp->pdev->irq);
8090 pr_cont("node addr %pM\n", dev->dev_addr);
Eilon Greensteinc0162012009-03-02 08:01:05 +00008091
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008092 return 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008093
8094init_one_exit:
8095 if (bp->regview)
8096 iounmap(bp->regview);
8097
8098 if (bp->doorbells)
8099 iounmap(bp->doorbells);
8100
8101 free_netdev(dev);
8102
8103 if (atomic_read(&pdev->enable_cnt) == 1)
8104 pci_release_regions(pdev);
8105
8106 pci_disable_device(pdev);
8107 pci_set_drvdata(pdev, NULL);
8108
8109 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008110}
8111
8112static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
8113{
8114 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -08008115 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008116
Eliezer Tamir228241e2008-02-28 11:56:57 -08008117 if (!dev) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008118 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
Eliezer Tamir228241e2008-02-28 11:56:57 -08008119 return;
8120 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08008121 bp = netdev_priv(dev);
8122
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008123 unregister_netdev(dev);
8124
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008125 /* Make sure RESET task is not scheduled before continuing */
8126 cancel_delayed_work_sync(&bp->reset_task);
8127
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008128 if (bp->regview)
8129 iounmap(bp->regview);
8130
8131 if (bp->doorbells)
8132 iounmap(bp->doorbells);
8133
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008134 bnx2x_free_mem_bp(bp);
8135
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008136 free_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008137
8138 if (atomic_read(&pdev->enable_cnt) == 1)
8139 pci_release_regions(pdev);
8140
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008141 pci_disable_device(pdev);
8142 pci_set_drvdata(pdev, NULL);
8143}
8144
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07008145static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
8146{
8147 int i;
8148
8149 bp->state = BNX2X_STATE_ERROR;
8150
8151 bp->rx_mode = BNX2X_RX_MODE_NONE;
8152
8153 bnx2x_netif_stop(bp, 0);
Stanislaw Gruszkac89af1a2010-05-17 17:35:38 -07008154 netif_carrier_off(bp->dev);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07008155
8156 del_timer_sync(&bp->timer);
8157 bp->stats_state = STATS_STATE_DISABLED;
8158 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
8159
8160 /* Release IRQs */
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00008161 bnx2x_free_irq(bp, false);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07008162
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07008163 /* Free SKBs, SGEs, TPA pool and driver internals */
8164 bnx2x_free_skbs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008165
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00008166 for_each_queue(bp, i)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07008167 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00008168 for_each_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +00008169 netif_napi_del(&bnx2x_fp(bp, i, napi));
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07008170 bnx2x_free_mem(bp);
8171
8172 bp->state = BNX2X_STATE_CLOSED;
8173
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07008174 return 0;
8175}
8176
8177static void bnx2x_eeh_recover(struct bnx2x *bp)
8178{
8179 u32 val;
8180
8181 mutex_init(&bp->port.phy_mutex);
8182
8183 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8184 bp->link_params.shmem_base = bp->common.shmem_base;
8185 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
8186
8187 if (!bp->common.shmem_base ||
8188 (bp->common.shmem_base < 0xA0000) ||
8189 (bp->common.shmem_base >= 0xC0000)) {
8190 BNX2X_DEV_INFO("MCP not active\n");
8191 bp->flags |= NO_MCP_FLAG;
8192 return;
8193 }
8194
8195 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8196 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8197 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8198 BNX2X_ERR("BAD MCP validity signature\n");
8199
8200 if (!BP_NOMCP(bp)) {
8201 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
8202 & DRV_MSG_SEQ_NUMBER_MASK);
8203 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8204 }
8205}
8206
Wendy Xiong493adb12008-06-23 20:36:22 -07008207/**
8208 * bnx2x_io_error_detected - called when PCI error is detected
8209 * @pdev: Pointer to PCI device
8210 * @state: The current pci connection state
8211 *
8212 * This function is called after a PCI bus error affecting
8213 * this device has been detected.
8214 */
8215static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
8216 pci_channel_state_t state)
8217{
8218 struct net_device *dev = pci_get_drvdata(pdev);
8219 struct bnx2x *bp = netdev_priv(dev);
8220
8221 rtnl_lock();
8222
8223 netif_device_detach(dev);
8224
Dean Nelson07ce50e2009-07-31 09:13:25 +00008225 if (state == pci_channel_io_perm_failure) {
8226 rtnl_unlock();
8227 return PCI_ERS_RESULT_DISCONNECT;
8228 }
8229
Wendy Xiong493adb12008-06-23 20:36:22 -07008230 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07008231 bnx2x_eeh_nic_unload(bp);
Wendy Xiong493adb12008-06-23 20:36:22 -07008232
8233 pci_disable_device(pdev);
8234
8235 rtnl_unlock();
8236
8237 /* Request a slot reset */
8238 return PCI_ERS_RESULT_NEED_RESET;
8239}
8240
8241/**
8242 * bnx2x_io_slot_reset - called after the PCI bus has been reset
8243 * @pdev: Pointer to PCI device
8244 *
8245 * Restart the card from scratch, as if from a cold-boot.
8246 */
8247static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
8248{
8249 struct net_device *dev = pci_get_drvdata(pdev);
8250 struct bnx2x *bp = netdev_priv(dev);
8251
8252 rtnl_lock();
8253
8254 if (pci_enable_device(pdev)) {
8255 dev_err(&pdev->dev,
8256 "Cannot re-enable PCI device after reset\n");
8257 rtnl_unlock();
8258 return PCI_ERS_RESULT_DISCONNECT;
8259 }
8260
8261 pci_set_master(pdev);
8262 pci_restore_state(pdev);
8263
8264 if (netif_running(dev))
8265 bnx2x_set_power_state(bp, PCI_D0);
8266
8267 rtnl_unlock();
8268
8269 return PCI_ERS_RESULT_RECOVERED;
8270}
8271
8272/**
8273 * bnx2x_io_resume - called when traffic can start flowing again
8274 * @pdev: Pointer to PCI device
8275 *
8276 * This callback is called when the error recovery driver tells us that
8277 * its OK to resume normal operation.
8278 */
8279static void bnx2x_io_resume(struct pci_dev *pdev)
8280{
8281 struct net_device *dev = pci_get_drvdata(pdev);
8282 struct bnx2x *bp = netdev_priv(dev);
8283
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008284 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
8285 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
8286 return;
8287 }
8288
Wendy Xiong493adb12008-06-23 20:36:22 -07008289 rtnl_lock();
8290
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07008291 bnx2x_eeh_recover(bp);
8292
Wendy Xiong493adb12008-06-23 20:36:22 -07008293 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07008294 bnx2x_nic_load(bp, LOAD_NORMAL);
Wendy Xiong493adb12008-06-23 20:36:22 -07008295
8296 netif_device_attach(dev);
8297
8298 rtnl_unlock();
8299}
8300
8301static struct pci_error_handlers bnx2x_err_handler = {
8302 .error_detected = bnx2x_io_error_detected,
Eilon Greenstein356e2382009-02-12 08:38:32 +00008303 .slot_reset = bnx2x_io_slot_reset,
8304 .resume = bnx2x_io_resume,
Wendy Xiong493adb12008-06-23 20:36:22 -07008305};
8306
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008307static struct pci_driver bnx2x_pci_driver = {
Wendy Xiong493adb12008-06-23 20:36:22 -07008308 .name = DRV_MODULE_NAME,
8309 .id_table = bnx2x_pci_tbl,
8310 .probe = bnx2x_init_one,
8311 .remove = __devexit_p(bnx2x_remove_one),
8312 .suspend = bnx2x_suspend,
8313 .resume = bnx2x_resume,
8314 .err_handler = &bnx2x_err_handler,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008315};
8316
8317static int __init bnx2x_init(void)
8318{
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00008319 int ret;
8320
Joe Perches7995c642010-02-17 15:01:52 +00008321 pr_info("%s", version);
Eilon Greenstein938cf542009-08-12 08:23:37 +00008322
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08008323 bnx2x_wq = create_singlethread_workqueue("bnx2x");
8324 if (bnx2x_wq == NULL) {
Joe Perches7995c642010-02-17 15:01:52 +00008325 pr_err("Cannot create workqueue\n");
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08008326 return -ENOMEM;
8327 }
8328
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00008329 ret = pci_register_driver(&bnx2x_pci_driver);
8330 if (ret) {
Joe Perches7995c642010-02-17 15:01:52 +00008331 pr_err("Cannot register driver\n");
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00008332 destroy_workqueue(bnx2x_wq);
8333 }
8334 return ret;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008335}
8336
8337static void __exit bnx2x_cleanup(void)
8338{
8339 pci_unregister_driver(&bnx2x_pci_driver);
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08008340
8341 destroy_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008342}
8343
8344module_init(bnx2x_init);
8345module_exit(bnx2x_cleanup);
8346
Michael Chan993ac7b2009-10-10 13:46:56 +00008347#ifdef BCM_CNIC
8348
8349/* count denotes the number of new completions we have seen */
8350static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
8351{
8352 struct eth_spe *spe;
8353
8354#ifdef BNX2X_STOP_ON_ERROR
8355 if (unlikely(bp->panic))
8356 return;
8357#endif
8358
8359 spin_lock_bh(&bp->spq_lock);
8360 bp->cnic_spq_pending -= count;
8361
8362 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
8363 bp->cnic_spq_pending++) {
8364
8365 if (!bp->cnic_kwq_pending)
8366 break;
8367
8368 spe = bnx2x_sp_get_next(bp);
8369 *spe = *bp->cnic_kwq_cons;
8370
8371 bp->cnic_kwq_pending--;
8372
8373 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
8374 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
8375
8376 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
8377 bp->cnic_kwq_cons = bp->cnic_kwq;
8378 else
8379 bp->cnic_kwq_cons++;
8380 }
8381 bnx2x_sp_prod_update(bp);
8382 spin_unlock_bh(&bp->spq_lock);
8383}
8384
8385static int bnx2x_cnic_sp_queue(struct net_device *dev,
8386 struct kwqe_16 *kwqes[], u32 count)
8387{
8388 struct bnx2x *bp = netdev_priv(dev);
8389 int i;
8390
8391#ifdef BNX2X_STOP_ON_ERROR
8392 if (unlikely(bp->panic))
8393 return -EIO;
8394#endif
8395
8396 spin_lock_bh(&bp->spq_lock);
8397
8398 for (i = 0; i < count; i++) {
8399 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
8400
8401 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
8402 break;
8403
8404 *bp->cnic_kwq_prod = *spe;
8405
8406 bp->cnic_kwq_pending++;
8407
8408 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
8409 spe->hdr.conn_and_cmd_data, spe->hdr.type,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008410 spe->data.update_data_addr.hi,
8411 spe->data.update_data_addr.lo,
Michael Chan993ac7b2009-10-10 13:46:56 +00008412 bp->cnic_kwq_pending);
8413
8414 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
8415 bp->cnic_kwq_prod = bp->cnic_kwq;
8416 else
8417 bp->cnic_kwq_prod++;
8418 }
8419
8420 spin_unlock_bh(&bp->spq_lock);
8421
8422 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
8423 bnx2x_cnic_sp_post(bp, 0);
8424
8425 return i;
8426}
8427
8428static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
8429{
8430 struct cnic_ops *c_ops;
8431 int rc = 0;
8432
8433 mutex_lock(&bp->cnic_mutex);
8434 c_ops = bp->cnic_ops;
8435 if (c_ops)
8436 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
8437 mutex_unlock(&bp->cnic_mutex);
8438
8439 return rc;
8440}
8441
8442static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
8443{
8444 struct cnic_ops *c_ops;
8445 int rc = 0;
8446
8447 rcu_read_lock();
8448 c_ops = rcu_dereference(bp->cnic_ops);
8449 if (c_ops)
8450 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
8451 rcu_read_unlock();
8452
8453 return rc;
8454}
8455
8456/*
8457 * for commands that have no data
8458 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00008459int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
Michael Chan993ac7b2009-10-10 13:46:56 +00008460{
8461 struct cnic_ctl_info ctl = {0};
8462
8463 ctl.cmd = cmd;
8464
8465 return bnx2x_cnic_ctl_send(bp, &ctl);
8466}
8467
8468static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
8469{
8470 struct cnic_ctl_info ctl;
8471
8472 /* first we tell CNIC and only then we count this as a completion */
8473 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
8474 ctl.data.comp.cid = cid;
8475
8476 bnx2x_cnic_ctl_send_bh(bp, &ctl);
8477 bnx2x_cnic_sp_post(bp, 1);
8478}
8479
8480static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
8481{
8482 struct bnx2x *bp = netdev_priv(dev);
8483 int rc = 0;
8484
8485 switch (ctl->cmd) {
8486 case DRV_CTL_CTXTBL_WR_CMD: {
8487 u32 index = ctl->data.io.offset;
8488 dma_addr_t addr = ctl->data.io.dma_addr;
8489
8490 bnx2x_ilt_wr(bp, index, addr);
8491 break;
8492 }
8493
8494 case DRV_CTL_COMPLETION_CMD: {
8495 int count = ctl->data.comp.comp_count;
8496
8497 bnx2x_cnic_sp_post(bp, count);
8498 break;
8499 }
8500
8501 /* rtnl_lock is held. */
8502 case DRV_CTL_START_L2_CMD: {
8503 u32 cli = ctl->data.ring.client_id;
8504
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008505 /* Set iSCSI MAC address */
8506 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
8507
8508 mmiowb();
8509 barrier();
8510
8511 /* Start accepting on iSCSI L2 ring. Accept all multicasts
8512 * because it's the only way for UIO Client to accept
8513 * multicasts (in non-promiscuous mode only one Client per
8514 * function will receive multicast packets (leading in our
8515 * case).
8516 */
8517 bnx2x_rxq_set_mac_filters(bp, cli,
8518 BNX2X_ACCEPT_UNICAST |
8519 BNX2X_ACCEPT_BROADCAST |
8520 BNX2X_ACCEPT_ALL_MULTICAST);
8521 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
8522
Michael Chan993ac7b2009-10-10 13:46:56 +00008523 break;
8524 }
8525
8526 /* rtnl_lock is held. */
8527 case DRV_CTL_STOP_L2_CMD: {
8528 u32 cli = ctl->data.ring.client_id;
8529
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008530 /* Stop accepting on iSCSI L2 ring */
8531 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
8532 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
8533
8534 mmiowb();
8535 barrier();
8536
8537 /* Unset iSCSI L2 MAC */
8538 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
Michael Chan993ac7b2009-10-10 13:46:56 +00008539 break;
8540 }
8541
8542 default:
8543 BNX2X_ERR("unknown command %x\n", ctl->cmd);
8544 rc = -EINVAL;
8545 }
8546
8547 return rc;
8548}
8549
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00008550void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
Michael Chan993ac7b2009-10-10 13:46:56 +00008551{
8552 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
8553
8554 if (bp->flags & USING_MSIX_FLAG) {
8555 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
8556 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
8557 cp->irq_arr[0].vector = bp->msix_table[1].vector;
8558 } else {
8559 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
8560 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
8561 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008562 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
Michael Chan993ac7b2009-10-10 13:46:56 +00008563 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008564 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
Michael Chan993ac7b2009-10-10 13:46:56 +00008565 cp->irq_arr[1].status_blk = bp->def_status_blk;
8566 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008567 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
Michael Chan993ac7b2009-10-10 13:46:56 +00008568
8569 cp->num_irq = 2;
8570}
8571
8572static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
8573 void *data)
8574{
8575 struct bnx2x *bp = netdev_priv(dev);
8576 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
8577
8578 if (ops == NULL)
8579 return -EINVAL;
8580
8581 if (atomic_read(&bp->intr_sem) != 0)
8582 return -EBUSY;
8583
8584 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
8585 if (!bp->cnic_kwq)
8586 return -ENOMEM;
8587
8588 bp->cnic_kwq_cons = bp->cnic_kwq;
8589 bp->cnic_kwq_prod = bp->cnic_kwq;
8590 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
8591
8592 bp->cnic_spq_pending = 0;
8593 bp->cnic_kwq_pending = 0;
8594
8595 bp->cnic_data = data;
8596
8597 cp->num_irq = 0;
8598 cp->drv_state = CNIC_DRV_STATE_REGD;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008599 cp->iro_arr = bp->iro_arr;
Michael Chan993ac7b2009-10-10 13:46:56 +00008600
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008601 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
8602 BNX2X_VF_ID_INVALID, false,
8603 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
Michael Chan993ac7b2009-10-10 13:46:56 +00008604
8605 bnx2x_setup_cnic_irq_info(bp);
8606 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
8607 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
8608 rcu_assign_pointer(bp->cnic_ops, ops);
8609
8610 return 0;
8611}
8612
8613static int bnx2x_unregister_cnic(struct net_device *dev)
8614{
8615 struct bnx2x *bp = netdev_priv(dev);
8616 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
8617
8618 mutex_lock(&bp->cnic_mutex);
8619 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
8620 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
8621 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
8622 }
8623 cp->drv_state = 0;
8624 rcu_assign_pointer(bp->cnic_ops, NULL);
8625 mutex_unlock(&bp->cnic_mutex);
8626 synchronize_rcu();
8627 kfree(bp->cnic_kwq);
8628 bp->cnic_kwq = NULL;
8629
8630 return 0;
8631}
8632
8633struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
8634{
8635 struct bnx2x *bp = netdev_priv(dev);
8636 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
8637
8638 cp->drv_owner = THIS_MODULE;
8639 cp->chip_id = CHIP_ID(bp);
8640 cp->pdev = bp->pdev;
8641 cp->io_base = bp->regview;
8642 cp->io_base2 = bp->doorbells;
8643 cp->max_kwqe_pending = 8;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008644 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
Michael Chan993ac7b2009-10-10 13:46:56 +00008645 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
8646 cp->ctx_tbl_len = CNIC_ILT_LINES;
8647 cp->starting_cid = BCM_CNIC_CID_START;
8648 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
8649 cp->drv_ctl = bnx2x_drv_ctl;
8650 cp->drv_register_cnic = bnx2x_register_cnic;
8651 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
8652
8653 return cp;
8654}
8655EXPORT_SYMBOL(bnx2x_cnic_probe);
8656
8657#endif /* BCM_CNIC */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008658