blob: a686a4c157105bcd350e23675d0005fb771594b5 [file] [log] [blame]
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001/* bnx2x_main.c: Broadcom Everest network driver.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002 *
Vladislav Zolotarov3359fce2010-02-17 13:35:01 -08003 * Copyright (c) 2007-2010 Broadcom Corporation
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
Eilon Greenstein24e3fce2008-06-12 14:30:28 -07009 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
Eilon Greensteinca003922009-08-12 22:53:28 -070013 * Slowpath and fastpath rework by Vladislav Zolotarov
Eliezer Tamirc14423f2008-02-28 11:49:42 -080014 * Statistics and Link management by Yitchak Gertner
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020015 *
16 */
17
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020018#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020026#include <linux/interrupt.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/dma-mapping.h>
33#include <linux/bitops.h>
34#include <linux/irq.h>
35#include <linux/delay.h>
36#include <asm/byteorder.h>
37#include <linux/time.h>
38#include <linux/ethtool.h>
39#include <linux/mii.h>
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080040#include <linux/if_vlan.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020041#include <net/ip.h>
42#include <net/tcp.h>
43#include <net/checksum.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070044#include <net/ip6_checksum.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020045#include <linux/workqueue.h>
46#include <linux/crc32.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070047#include <linux/crc32c.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020048#include <linux/prefetch.h>
49#include <linux/zlib.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020050#include <linux/io.h>
Ben Hutchings45229b42009-11-07 11:53:39 +000051#include <linux/stringify.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020052
Dmitry Kravkovb0efbb92010-07-27 12:33:43 +000053#define BNX2X_MAIN
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020054#include "bnx2x.h"
55#include "bnx2x_init.h"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070056#include "bnx2x_init_ops.h"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000057#include "bnx2x_cmn.h"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020058
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020059
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070060#include <linux/firmware.h>
61#include "bnx2x_fw_file_hdr.h"
62/* FW files */
Ben Hutchings45229b42009-11-07 11:53:39 +000063#define FW_FILE_VERSION \
64 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
65 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
66 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
67 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
Dmitry Kravkov560131f2010-10-06 03:18:47 +000068#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
69#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000070#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070071
Eilon Greenstein34f80b02008-06-23 20:33:01 -070072/* Time in jiffies before concluding the transmitter is hung */
73#define TX_TIMEOUT (5*HZ)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020074
Andrew Morton53a10562008-02-09 23:16:41 -080075static char version[] __devinitdata =
Eilon Greenstein34f80b02008-06-23 20:33:01 -070076 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020077 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78
Eilon Greenstein24e3fce2008-06-12 14:30:28 -070079MODULE_AUTHOR("Eliezer Tamir");
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000080MODULE_DESCRIPTION("Broadcom NetXtreme II "
81 "BCM57710/57711/57711E/57712/57712E Driver");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020082MODULE_LICENSE("GPL");
83MODULE_VERSION(DRV_MODULE_VERSION);
Ben Hutchings45229b42009-11-07 11:53:39 +000084MODULE_FIRMWARE(FW_FILE_NAME_E1);
85MODULE_FIRMWARE(FW_FILE_NAME_E1H);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000086MODULE_FIRMWARE(FW_FILE_NAME_E2);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020087
Eilon Greenstein555f6c72009-02-12 08:36:11 +000088static int multi_mode = 1;
89module_param(multi_mode, int, 0);
Eilon Greensteinca003922009-08-12 22:53:28 -070090MODULE_PARM_DESC(multi_mode, " Multi queue mode "
91 "(0 Disable; 1 Enable (default))");
92
Dmitry Kravkovd6214d72010-10-06 03:32:10 +000093int num_queues;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000094module_param(num_queues, int, 0);
95MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
96 " (default is as a number of CPUs)");
Eilon Greenstein555f6c72009-02-12 08:36:11 +000097
Eilon Greenstein19680c42008-08-13 15:47:33 -070098static int disable_tpa;
Eilon Greenstein19680c42008-08-13 15:47:33 -070099module_param(disable_tpa, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000100MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
Eilon Greenstein8badd272009-02-12 08:36:15 +0000101
102static int int_mode;
103module_param(int_mode, int, 0);
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000104MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
105 "(1 INT#x; 2 MSI)");
Eilon Greenstein8badd272009-02-12 08:36:15 +0000106
Eilon Greensteina18f5122009-08-12 08:23:26 +0000107static int dropless_fc;
108module_param(dropless_fc, int, 0);
109MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110
Eilon Greenstein9898f862009-02-12 08:38:27 +0000111static int poll;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200112module_param(poll, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000113MODULE_PARM_DESC(poll, " Use polling (for debug)");
Eilon Greenstein8d5726c2009-02-12 08:37:19 +0000114
115static int mrrs = -1;
116module_param(mrrs, int, 0);
117MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
Eilon Greenstein9898f862009-02-12 08:38:27 +0000119static int debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200120module_param(debug, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000121MODULE_PARM_DESC(debug, " Default debug msglevel");
122
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800123static struct workqueue_struct *bnx2x_wq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200124
125enum bnx2x_board_type {
126 BCM57710 = 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700127 BCM57711 = 1,
128 BCM57711E = 2,
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000129 BCM57712 = 3,
130 BCM57712E = 4
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200131};
132
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700133/* indexed by board_type, above */
Andrew Morton53a10562008-02-09 23:16:41 -0800134static struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200135 char *name;
136} board_info[] __devinitdata = {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700137 { "Broadcom NetXtreme II BCM57710 XGb" },
138 { "Broadcom NetXtreme II BCM57711 XGb" },
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000139 { "Broadcom NetXtreme II BCM57711E XGb" },
140 { "Broadcom NetXtreme II BCM57712 XGb" },
141 { "Broadcom NetXtreme II BCM57712E XGb" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200142};
143
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000144#ifndef PCI_DEVICE_ID_NX2_57712
145#define PCI_DEVICE_ID_NX2_57712 0x1662
146#endif
147#ifndef PCI_DEVICE_ID_NX2_57712E
148#define PCI_DEVICE_ID_NX2_57712E 0x1663
149#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700150
Alexey Dobriyana3aa1882010-01-07 11:58:11 +0000151static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
Eilon Greensteine4ed7112009-08-12 08:24:10 +0000152 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
153 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
154 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000155 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
156 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200157 { 0 }
158};
159
160MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
161
162/****************************************************************************
163* General service functions
164****************************************************************************/
165
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000166static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
167 u32 addr, dma_addr_t mapping)
168{
169 REG_WR(bp, addr, U64_LO(mapping));
170 REG_WR(bp, addr + 4, U64_HI(mapping));
171}
172
173static inline void __storm_memset_fill(struct bnx2x *bp,
174 u32 addr, size_t size, u32 val)
175{
176 int i;
177 for (i = 0; i < size/4; i++)
178 REG_WR(bp, addr + (i * 4), val);
179}
180
181static inline void storm_memset_ustats_zero(struct bnx2x *bp,
182 u8 port, u16 stat_id)
183{
184 size_t size = sizeof(struct ustorm_per_client_stats);
185
186 u32 addr = BAR_USTRORM_INTMEM +
187 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
188
189 __storm_memset_fill(bp, addr, size, 0);
190}
191
192static inline void storm_memset_tstats_zero(struct bnx2x *bp,
193 u8 port, u16 stat_id)
194{
195 size_t size = sizeof(struct tstorm_per_client_stats);
196
197 u32 addr = BAR_TSTRORM_INTMEM +
198 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
199
200 __storm_memset_fill(bp, addr, size, 0);
201}
202
203static inline void storm_memset_xstats_zero(struct bnx2x *bp,
204 u8 port, u16 stat_id)
205{
206 size_t size = sizeof(struct xstorm_per_client_stats);
207
208 u32 addr = BAR_XSTRORM_INTMEM +
209 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
210
211 __storm_memset_fill(bp, addr, size, 0);
212}
213
214
215static inline void storm_memset_spq_addr(struct bnx2x *bp,
216 dma_addr_t mapping, u16 abs_fid)
217{
218 u32 addr = XSEM_REG_FAST_MEMORY +
219 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
220
221 __storm_memset_dma_mapping(bp, addr, mapping);
222}
223
224static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
225{
226 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
227}
228
229static inline void storm_memset_func_cfg(struct bnx2x *bp,
230 struct tstorm_eth_function_common_config *tcfg,
231 u16 abs_fid)
232{
233 size_t size = sizeof(struct tstorm_eth_function_common_config);
234
235 u32 addr = BAR_TSTRORM_INTMEM +
236 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
237
238 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
239}
240
241static inline void storm_memset_xstats_flags(struct bnx2x *bp,
242 struct stats_indication_flags *flags,
243 u16 abs_fid)
244{
245 size_t size = sizeof(struct stats_indication_flags);
246
247 u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
248
249 __storm_memset_struct(bp, addr, size, (u32 *)flags);
250}
251
252static inline void storm_memset_tstats_flags(struct bnx2x *bp,
253 struct stats_indication_flags *flags,
254 u16 abs_fid)
255{
256 size_t size = sizeof(struct stats_indication_flags);
257
258 u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
259
260 __storm_memset_struct(bp, addr, size, (u32 *)flags);
261}
262
263static inline void storm_memset_ustats_flags(struct bnx2x *bp,
264 struct stats_indication_flags *flags,
265 u16 abs_fid)
266{
267 size_t size = sizeof(struct stats_indication_flags);
268
269 u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
270
271 __storm_memset_struct(bp, addr, size, (u32 *)flags);
272}
273
274static inline void storm_memset_cstats_flags(struct bnx2x *bp,
275 struct stats_indication_flags *flags,
276 u16 abs_fid)
277{
278 size_t size = sizeof(struct stats_indication_flags);
279
280 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
281
282 __storm_memset_struct(bp, addr, size, (u32 *)flags);
283}
284
285static inline void storm_memset_xstats_addr(struct bnx2x *bp,
286 dma_addr_t mapping, u16 abs_fid)
287{
288 u32 addr = BAR_XSTRORM_INTMEM +
289 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
290
291 __storm_memset_dma_mapping(bp, addr, mapping);
292}
293
294static inline void storm_memset_tstats_addr(struct bnx2x *bp,
295 dma_addr_t mapping, u16 abs_fid)
296{
297 u32 addr = BAR_TSTRORM_INTMEM +
298 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
299
300 __storm_memset_dma_mapping(bp, addr, mapping);
301}
302
303static inline void storm_memset_ustats_addr(struct bnx2x *bp,
304 dma_addr_t mapping, u16 abs_fid)
305{
306 u32 addr = BAR_USTRORM_INTMEM +
307 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
308
309 __storm_memset_dma_mapping(bp, addr, mapping);
310}
311
312static inline void storm_memset_cstats_addr(struct bnx2x *bp,
313 dma_addr_t mapping, u16 abs_fid)
314{
315 u32 addr = BAR_CSTRORM_INTMEM +
316 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
317
318 __storm_memset_dma_mapping(bp, addr, mapping);
319}
320
321static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
322 u16 pf_id)
323{
324 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
325 pf_id);
326 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
327 pf_id);
328 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
329 pf_id);
330 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
331 pf_id);
332}
333
334static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
335 u8 enable)
336{
337 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
338 enable);
339 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
340 enable);
341 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
342 enable);
343 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
344 enable);
345}
346
347static inline void storm_memset_eq_data(struct bnx2x *bp,
348 struct event_ring_data *eq_data,
349 u16 pfid)
350{
351 size_t size = sizeof(struct event_ring_data);
352
353 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
354
355 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
356}
357
358static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
359 u16 pfid)
360{
361 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
362 REG_WR16(bp, addr, eq_prod);
363}
364
365static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
366 u16 fw_sb_id, u8 sb_index,
367 u8 ticks)
368{
369
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000370 int index_offset = CHIP_IS_E2(bp) ?
371 offsetof(struct hc_status_block_data_e2, index_data) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000372 offsetof(struct hc_status_block_data_e1x, index_data);
373 u32 addr = BAR_CSTRORM_INTMEM +
374 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
375 index_offset +
376 sizeof(struct hc_index_data)*sb_index +
377 offsetof(struct hc_index_data, timeout);
378 REG_WR8(bp, addr, ticks);
379 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
380 port, fw_sb_id, sb_index, ticks);
381}
382static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
383 u16 fw_sb_id, u8 sb_index,
384 u8 disable)
385{
386 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000387 int index_offset = CHIP_IS_E2(bp) ?
388 offsetof(struct hc_status_block_data_e2, index_data) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000389 offsetof(struct hc_status_block_data_e1x, index_data);
390 u32 addr = BAR_CSTRORM_INTMEM +
391 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
392 index_offset +
393 sizeof(struct hc_index_data)*sb_index +
394 offsetof(struct hc_index_data, flags);
395 u16 flags = REG_RD16(bp, addr);
396 /* clear and set */
397 flags &= ~HC_INDEX_DATA_HC_ENABLED;
398 flags |= enable_flag;
399 REG_WR16(bp, addr, flags);
400 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
401 port, fw_sb_id, sb_index, disable);
402}
403
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200404/* used only at init
405 * locking is done by mcp
406 */
Eilon Greenstein573f2032009-08-12 08:24:14 +0000407void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200408{
409 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
410 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
411 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
412 PCICFG_VENDOR_ID_OFFSET);
413}
414
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200415static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
416{
417 u32 val;
418
419 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
420 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
421 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
422 PCICFG_VENDOR_ID_OFFSET);
423
424 return val;
425}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200426
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000427#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
428#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
429#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
430#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
431#define DMAE_DP_DST_NONE "dst_addr [none]"
432
433void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl)
434{
435 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
436
437 switch (dmae->opcode & DMAE_COMMAND_DST) {
438 case DMAE_CMD_DST_PCI:
439 if (src_type == DMAE_CMD_SRC_PCI)
440 DP(msglvl, "DMAE: opcode 0x%08x\n"
441 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
442 "comp_addr [%x:%08x], comp_val 0x%08x\n",
443 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
444 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
445 dmae->comp_addr_hi, dmae->comp_addr_lo,
446 dmae->comp_val);
447 else
448 DP(msglvl, "DMAE: opcode 0x%08x\n"
449 "src [%08x], len [%d*4], dst [%x:%08x]\n"
450 "comp_addr [%x:%08x], comp_val 0x%08x\n",
451 dmae->opcode, dmae->src_addr_lo >> 2,
452 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
453 dmae->comp_addr_hi, dmae->comp_addr_lo,
454 dmae->comp_val);
455 break;
456 case DMAE_CMD_DST_GRC:
457 if (src_type == DMAE_CMD_SRC_PCI)
458 DP(msglvl, "DMAE: opcode 0x%08x\n"
459 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
460 "comp_addr [%x:%08x], comp_val 0x%08x\n",
461 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
462 dmae->len, dmae->dst_addr_lo >> 2,
463 dmae->comp_addr_hi, dmae->comp_addr_lo,
464 dmae->comp_val);
465 else
466 DP(msglvl, "DMAE: opcode 0x%08x\n"
467 "src [%08x], len [%d*4], dst [%08x]\n"
468 "comp_addr [%x:%08x], comp_val 0x%08x\n",
469 dmae->opcode, dmae->src_addr_lo >> 2,
470 dmae->len, dmae->dst_addr_lo >> 2,
471 dmae->comp_addr_hi, dmae->comp_addr_lo,
472 dmae->comp_val);
473 break;
474 default:
475 if (src_type == DMAE_CMD_SRC_PCI)
476 DP(msglvl, "DMAE: opcode 0x%08x\n"
477 DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
478 "dst_addr [none]\n"
479 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
480 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
481 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
482 dmae->comp_val);
483 else
484 DP(msglvl, "DMAE: opcode 0x%08x\n"
485 DP_LEVEL "src_addr [%08x] len [%d * 4] "
486 "dst_addr [none]\n"
487 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
488 dmae->opcode, dmae->src_addr_lo >> 2,
489 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
490 dmae->comp_val);
491 break;
492 }
493
494}
495
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000496const u32 dmae_reg_go_c[] = {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200497 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
498 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
499 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
500 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
501};
502
503/* copy command into DMAE command memory and set DMAE command go */
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000504void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200505{
506 u32 cmd_offset;
507 int i;
508
509 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
510 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
511 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
512
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700513 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
514 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200515 }
516 REG_WR(bp, dmae_reg_go_c[idx], 1);
517}
518
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000519u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
520{
521 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
522 DMAE_CMD_C_ENABLE);
523}
524
525u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
526{
527 return opcode & ~DMAE_CMD_SRC_RESET;
528}
529
530u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
531 bool with_comp, u8 comp_type)
532{
533 u32 opcode = 0;
534
535 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
536 (dst_type << DMAE_COMMAND_DST_SHIFT));
537
538 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
539
540 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
541 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
542 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
543 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
544
545#ifdef __BIG_ENDIAN
546 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
547#else
548 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
549#endif
550 if (with_comp)
551 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
552 return opcode;
553}
554
555void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
556 u8 src_type, u8 dst_type)
557{
558 memset(dmae, 0, sizeof(struct dmae_command));
559
560 /* set the opcode */
561 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
562 true, DMAE_COMP_PCI);
563
564 /* fill in the completion parameters */
565 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
566 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
567 dmae->comp_val = DMAE_COMP_VAL;
568}
569
570/* issue a dmae command over the init-channel and wailt for completion */
571int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
572{
573 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
574 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
575 int rc = 0;
576
577 DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
578 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
579 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
580
581 /* lock the dmae channel */
582 mutex_lock(&bp->dmae_mutex);
583
584 /* reset completion */
585 *wb_comp = 0;
586
587 /* post the command on the channel used for initializations */
588 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
589
590 /* wait for completion */
591 udelay(5);
592 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
593 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
594
595 if (!cnt) {
596 BNX2X_ERR("DMAE timeout!\n");
597 rc = DMAE_TIMEOUT;
598 goto unlock;
599 }
600 cnt--;
601 udelay(50);
602 }
603 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
604 BNX2X_ERR("DMAE PCI error!\n");
605 rc = DMAE_PCI_ERROR;
606 }
607
608 DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
609 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
610 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
611
612unlock:
613 mutex_unlock(&bp->dmae_mutex);
614 return rc;
615}
616
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700617void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
618 u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200619{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000620 struct dmae_command dmae;
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700621
622 if (!bp->dmae_ready) {
623 u32 *data = bnx2x_sp(bp, wb_data[0]);
624
625 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
626 " using indirect\n", dst_addr, len32);
627 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
628 return;
629 }
630
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000631 /* set opcode and fixed command fields */
632 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200633
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000634 /* fill in addresses and len */
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000635 dmae.src_addr_lo = U64_LO(dma_addr);
636 dmae.src_addr_hi = U64_HI(dma_addr);
637 dmae.dst_addr_lo = dst_addr >> 2;
638 dmae.dst_addr_hi = 0;
639 dmae.len = len32;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200640
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000641 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200642
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000643 /* issue the command and wait for completion */
644 bnx2x_issue_dmae_with_comp(bp, &dmae);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200645}
646
Yaniv Rosnerc18487e2008-06-23 20:27:52 -0700647void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200648{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000649 struct dmae_command dmae;
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700650
651 if (!bp->dmae_ready) {
652 u32 *data = bnx2x_sp(bp, wb_data[0]);
653 int i;
654
655 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
656 " using indirect\n", src_addr, len32);
657 for (i = 0; i < len32; i++)
658 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
659 return;
660 }
661
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000662 /* set opcode and fixed command fields */
663 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200664
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000665 /* fill in addresses and len */
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000666 dmae.src_addr_lo = src_addr >> 2;
667 dmae.src_addr_hi = 0;
668 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
669 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
670 dmae.len = len32;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200671
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000672 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200673
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000674 /* issue the command and wait for completion */
675 bnx2x_issue_dmae_with_comp(bp, &dmae);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200676}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200677
Eilon Greenstein573f2032009-08-12 08:24:14 +0000678void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
679 u32 addr, u32 len)
680{
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000681 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
Eilon Greenstein573f2032009-08-12 08:24:14 +0000682 int offset = 0;
683
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000684 while (len > dmae_wr_max) {
Eilon Greenstein573f2032009-08-12 08:24:14 +0000685 bnx2x_write_dmae(bp, phys_addr + offset,
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000686 addr + offset, dmae_wr_max);
687 offset += dmae_wr_max * 4;
688 len -= dmae_wr_max;
Eilon Greenstein573f2032009-08-12 08:24:14 +0000689 }
690
691 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
692}
693
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700694/* used only for slowpath so not inlined */
695static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
696{
697 u32 wb_write[2];
698
699 wb_write[0] = val_hi;
700 wb_write[1] = val_lo;
701 REG_WR_DMAE(bp, reg, wb_write, 2);
702}
703
704#ifdef USE_WB_RD
705static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
706{
707 u32 wb_data[2];
708
709 REG_RD_DMAE(bp, reg, wb_data, 2);
710
711 return HILO_U64(wb_data[0], wb_data[1]);
712}
713#endif
714
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200715static int bnx2x_mc_assert(struct bnx2x *bp)
716{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200717 char last_idx;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700718 int i, rc = 0;
719 u32 row0, row1, row2, row3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200720
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700721 /* XSTORM */
722 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
723 XSTORM_ASSERT_LIST_INDEX_OFFSET);
724 if (last_idx)
725 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200726
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700727 /* print the asserts */
728 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200729
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700730 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
731 XSTORM_ASSERT_LIST_OFFSET(i));
732 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
733 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
734 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
735 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
736 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
737 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200738
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700739 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
740 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
741 " 0x%08x 0x%08x 0x%08x\n",
742 i, row3, row2, row1, row0);
743 rc++;
744 } else {
745 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200746 }
747 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700748
749 /* TSTORM */
750 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
751 TSTORM_ASSERT_LIST_INDEX_OFFSET);
752 if (last_idx)
753 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
754
755 /* print the asserts */
756 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
757
758 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
759 TSTORM_ASSERT_LIST_OFFSET(i));
760 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
761 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
762 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
763 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
764 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
765 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
766
767 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
768 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
769 " 0x%08x 0x%08x 0x%08x\n",
770 i, row3, row2, row1, row0);
771 rc++;
772 } else {
773 break;
774 }
775 }
776
777 /* CSTORM */
778 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
779 CSTORM_ASSERT_LIST_INDEX_OFFSET);
780 if (last_idx)
781 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
782
783 /* print the asserts */
784 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
785
786 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
787 CSTORM_ASSERT_LIST_OFFSET(i));
788 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
789 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
790 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
791 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
792 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
793 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
794
795 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
796 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
797 " 0x%08x 0x%08x 0x%08x\n",
798 i, row3, row2, row1, row0);
799 rc++;
800 } else {
801 break;
802 }
803 }
804
805 /* USTORM */
806 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
807 USTORM_ASSERT_LIST_INDEX_OFFSET);
808 if (last_idx)
809 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
810
811 /* print the asserts */
812 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
813
814 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
815 USTORM_ASSERT_LIST_OFFSET(i));
816 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
817 USTORM_ASSERT_LIST_OFFSET(i) + 4);
818 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
819 USTORM_ASSERT_LIST_OFFSET(i) + 8);
820 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
821 USTORM_ASSERT_LIST_OFFSET(i) + 12);
822
823 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
824 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
825 " 0x%08x 0x%08x 0x%08x\n",
826 i, row3, row2, row1, row0);
827 rc++;
828 } else {
829 break;
830 }
831 }
832
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200833 return rc;
834}
Eliezer Tamirc14423f2008-02-28 11:49:42 -0800835
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200836static void bnx2x_fw_dump(struct bnx2x *bp)
837{
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000838 u32 addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200839 u32 mark, offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +0000840 __be32 data[9];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200841 int word;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000842 u32 trace_shmem_base;
Vladislav Zolotarov2145a922010-04-19 01:13:49 +0000843 if (BP_NOMCP(bp)) {
844 BNX2X_ERR("NO MCP - can not dump\n");
845 return;
846 }
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000847
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000848 if (BP_PATH(bp) == 0)
849 trace_shmem_base = bp->common.shmem_base;
850 else
851 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
852 addr = trace_shmem_base - 0x0800 + 4;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000853 mark = REG_RD(bp, addr);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000854 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
855 + ((mark + 0x3) & ~0x3) - 0x08000000;
Joe Perches7995c642010-02-17 15:01:52 +0000856 pr_err("begin fw dump (mark 0x%x)\n", mark);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200857
Joe Perches7995c642010-02-17 15:01:52 +0000858 pr_err("");
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000859 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200860 for (word = 0; word < 8; word++)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000861 data[word] = htonl(REG_RD(bp, offset + 4*word));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200862 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000863 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200864 }
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000865 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200866 for (word = 0; word < 8; word++)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000867 data[word] = htonl(REG_RD(bp, offset + 4*word));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200868 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000869 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200870 }
Joe Perches7995c642010-02-17 15:01:52 +0000871 pr_err("end of fw dump\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200872}
873
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000874void bnx2x_panic_dump(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200875{
876 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000877 u16 j;
878 struct hc_sp_status_block_data sp_sb_data;
879 int func = BP_FUNC(bp);
880#ifdef BNX2X_STOP_ON_ERROR
881 u16 start = 0, end = 0;
882#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200883
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700884 bp->stats_state = STATS_STATE_DISABLED;
885 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
886
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200887 BNX2X_ERR("begin crash dump -----------------\n");
888
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000889 /* Indices */
890 /* Common */
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000891 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000892 " spq_prod_idx(0x%x)\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000893 bp->def_idx, bp->def_att_idx,
894 bp->attn_state, bp->spq_prod_idx);
895 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
896 bp->def_status_blk->atten_status_block.attn_bits,
897 bp->def_status_blk->atten_status_block.attn_bits_ack,
898 bp->def_status_blk->atten_status_block.status_block_id,
899 bp->def_status_blk->atten_status_block.attn_bits_index);
900 BNX2X_ERR(" def (");
901 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
902 pr_cont("0x%x%s",
903 bp->def_status_blk->sp_sb.index_values[i],
904 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000905
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000906 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
907 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
908 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
909 i*sizeof(u32));
910
911 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
912 "pf_id(0x%x) vnic_id(0x%x) "
913 "vf_id(0x%x) vf_valid (0x%x)\n",
914 sp_sb_data.igu_sb_id,
915 sp_sb_data.igu_seg_id,
916 sp_sb_data.p_func.pf_id,
917 sp_sb_data.p_func.vnic_id,
918 sp_sb_data.p_func.vf_id,
919 sp_sb_data.p_func.vf_valid);
920
921
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000922 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000923 struct bnx2x_fastpath *fp = &bp->fp[i];
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000924 int loop;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000925 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000926 struct hc_status_block_data_e1x sb_data_e1x;
927 struct hc_status_block_sm *hc_sm_p =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000928 CHIP_IS_E2(bp) ?
929 sb_data_e2.common.state_machine :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000930 sb_data_e1x.common.state_machine;
931 struct hc_index_data *hc_index_p =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000932 CHIP_IS_E2(bp) ?
933 sb_data_e2.index_data :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000934 sb_data_e1x.index_data;
935 int data_size;
936 u32 *sb_data_p;
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000937
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000938 /* Rx */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000939 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000940 " rx_comp_prod(0x%x)"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000941 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000942 i, fp->rx_bd_prod, fp->rx_bd_cons,
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000943 fp->rx_comp_prod,
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000944 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000945 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000946 " fp_hc_idx(0x%x)\n",
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000947 fp->rx_sge_prod, fp->last_max_sge,
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000948 le16_to_cpu(fp->fp_hc_idx));
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000949
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000950 /* Tx */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000951 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
952 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
953 " *tx_cons_sb(0x%x)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200954 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700955 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000956
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000957 loop = CHIP_IS_E2(bp) ?
958 HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000959
960 /* host sb data */
961
962 BNX2X_ERR(" run indexes (");
963 for (j = 0; j < HC_SB_MAX_SM; j++)
964 pr_cont("0x%x%s",
965 fp->sb_running_index[j],
966 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
967
968 BNX2X_ERR(" indexes (");
969 for (j = 0; j < loop; j++)
970 pr_cont("0x%x%s",
971 fp->sb_index_values[j],
972 (j == loop - 1) ? ")" : " ");
973 /* fw sb data */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000974 data_size = CHIP_IS_E2(bp) ?
975 sizeof(struct hc_status_block_data_e2) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000976 sizeof(struct hc_status_block_data_e1x);
977 data_size /= sizeof(u32);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000978 sb_data_p = CHIP_IS_E2(bp) ?
979 (u32 *)&sb_data_e2 :
980 (u32 *)&sb_data_e1x;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000981 /* copy sb data in here */
982 for (j = 0; j < data_size; j++)
983 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
984 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
985 j * sizeof(u32));
986
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000987 if (CHIP_IS_E2(bp)) {
988 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
989 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
990 sb_data_e2.common.p_func.pf_id,
991 sb_data_e2.common.p_func.vf_id,
992 sb_data_e2.common.p_func.vf_valid,
993 sb_data_e2.common.p_func.vnic_id,
994 sb_data_e2.common.same_igu_sb_1b);
995 } else {
996 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
997 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
998 sb_data_e1x.common.p_func.pf_id,
999 sb_data_e1x.common.p_func.vf_id,
1000 sb_data_e1x.common.p_func.vf_valid,
1001 sb_data_e1x.common.p_func.vnic_id,
1002 sb_data_e1x.common.same_igu_sb_1b);
1003 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001004
1005 /* SB_SMs data */
1006 for (j = 0; j < HC_SB_MAX_SM; j++) {
1007 pr_cont("SM[%d] __flags (0x%x) "
1008 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
1009 "time_to_expire (0x%x) "
1010 "timer_value(0x%x)\n", j,
1011 hc_sm_p[j].__flags,
1012 hc_sm_p[j].igu_sb_id,
1013 hc_sm_p[j].igu_seg_id,
1014 hc_sm_p[j].time_to_expire,
1015 hc_sm_p[j].timer_value);
1016 }
1017
1018 /* Indecies data */
1019 for (j = 0; j < loop; j++) {
1020 pr_cont("INDEX[%d] flags (0x%x) "
1021 "timeout (0x%x)\n", j,
1022 hc_index_p[j].flags,
1023 hc_index_p[j].timeout);
1024 }
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001025 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001026
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001027#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001028 /* Rings */
1029 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001030 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001031 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001032
1033 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1034 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001035 for (j = start; j != end; j = RX_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001036 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1037 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1038
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001039 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1040 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001041 }
1042
Eilon Greenstein3196a882008-08-13 15:58:49 -07001043 start = RX_SGE(fp->rx_sge_prod);
1044 end = RX_SGE(fp->last_max_sge);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001045 for (j = start; j != end; j = RX_SGE(j + 1)) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001046 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1047 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1048
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001049 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1050 i, j, rx_sge[1], rx_sge[0], sw_page->page);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001051 }
1052
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001053 start = RCQ_BD(fp->rx_comp_cons - 10);
1054 end = RCQ_BD(fp->rx_comp_cons + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001055 for (j = start; j != end; j = RCQ_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001056 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1057
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001058 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1059 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001060 }
1061 }
1062
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001063 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001064 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001065 struct bnx2x_fastpath *fp = &bp->fp[i];
1066
1067 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
1068 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
1069 for (j = start; j != end; j = TX_BD(j + 1)) {
1070 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
1071
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001072 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
1073 i, j, sw_bd->skb, sw_bd->first_bd);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001074 }
1075
1076 start = TX_BD(fp->tx_bd_cons - 10);
1077 end = TX_BD(fp->tx_bd_cons + 254);
1078 for (j = start; j != end; j = TX_BD(j + 1)) {
1079 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
1080
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001081 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
1082 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001083 }
1084 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001085#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001086 bnx2x_fw_dump(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001087 bnx2x_mc_assert(bp);
1088 BNX2X_ERR("end crash dump -----------------\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001089}
1090
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001091static void bnx2x_hc_int_enable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001092{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001093 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001094 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1095 u32 val = REG_RD(bp, addr);
1096 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +00001097 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001098
1099 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001100 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1101 HC_CONFIG_0_REG_INT_LINE_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001102 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1103 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eilon Greenstein8badd272009-02-12 08:36:15 +00001104 } else if (msi) {
1105 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1106 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1107 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1108 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001109 } else {
1110 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001111 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001112 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1113 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001114
Eilon Greenstein8badd272009-02-12 08:36:15 +00001115 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1116 val, port, addr);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001117
1118 REG_WR(bp, addr, val);
1119
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001120 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1121 }
1122
Eilon Greenstein8badd272009-02-12 08:36:15 +00001123 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
1124 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001125
1126 REG_WR(bp, addr, val);
Eilon Greenstein37dbbf32009-07-21 05:47:33 +00001127 /*
1128 * Ensure that HC_CONFIG is written before leading/trailing edge config
1129 */
1130 mmiowb();
1131 barrier();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001132
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001133 if (!CHIP_IS_E1(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001134 /* init leading/trailing edge */
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00001135 if (IS_MF(bp)) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001136 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001137 if (bp->port.pmf)
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001138 /* enable nig and gpio3 attention */
1139 val |= 0x1100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001140 } else
1141 val = 0xffff;
1142
1143 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1144 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1145 }
Eilon Greenstein37dbbf32009-07-21 05:47:33 +00001146
1147 /* Make sure that interrupts are indeed enabled from here on */
1148 mmiowb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001149}
1150
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001151static void bnx2x_igu_int_enable(struct bnx2x *bp)
1152{
1153 u32 val;
1154 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1155 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1156
1157 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1158
1159 if (msix) {
1160 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1161 IGU_PF_CONF_SINGLE_ISR_EN);
1162 val |= (IGU_PF_CONF_FUNC_EN |
1163 IGU_PF_CONF_MSI_MSIX_EN |
1164 IGU_PF_CONF_ATTN_BIT_EN);
1165 } else if (msi) {
1166 val &= ~IGU_PF_CONF_INT_LINE_EN;
1167 val |= (IGU_PF_CONF_FUNC_EN |
1168 IGU_PF_CONF_MSI_MSIX_EN |
1169 IGU_PF_CONF_ATTN_BIT_EN |
1170 IGU_PF_CONF_SINGLE_ISR_EN);
1171 } else {
1172 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1173 val |= (IGU_PF_CONF_FUNC_EN |
1174 IGU_PF_CONF_INT_LINE_EN |
1175 IGU_PF_CONF_ATTN_BIT_EN |
1176 IGU_PF_CONF_SINGLE_ISR_EN);
1177 }
1178
1179 DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
1180 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1181
1182 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1183
1184 barrier();
1185
1186 /* init leading/trailing edge */
1187 if (IS_MF(bp)) {
1188 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1189 if (bp->port.pmf)
1190 /* enable nig and gpio3 attention */
1191 val |= 0x1100;
1192 } else
1193 val = 0xffff;
1194
1195 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1196 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1197
1198 /* Make sure that interrupts are indeed enabled from here on */
1199 mmiowb();
1200}
1201
1202void bnx2x_int_enable(struct bnx2x *bp)
1203{
1204 if (bp->common.int_block == INT_BLOCK_HC)
1205 bnx2x_hc_int_enable(bp);
1206 else
1207 bnx2x_igu_int_enable(bp);
1208}
1209
1210static void bnx2x_hc_int_disable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001211{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001212 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001213 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1214 u32 val = REG_RD(bp, addr);
1215
1216 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1217 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1218 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1219 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1220
1221 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1222 val, port, addr);
1223
Eilon Greenstein8badd272009-02-12 08:36:15 +00001224 /* flush all outstanding writes */
1225 mmiowb();
1226
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001227 REG_WR(bp, addr, val);
1228 if (REG_RD(bp, addr) != val)
1229 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1230}
1231
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001232static void bnx2x_igu_int_disable(struct bnx2x *bp)
1233{
1234 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1235
1236 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1237 IGU_PF_CONF_INT_LINE_EN |
1238 IGU_PF_CONF_ATTN_BIT_EN);
1239
1240 DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1241
1242 /* flush all outstanding writes */
1243 mmiowb();
1244
1245 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1246 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1247 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1248}
1249
1250void bnx2x_int_disable(struct bnx2x *bp)
1251{
1252 if (bp->common.int_block == INT_BLOCK_HC)
1253 bnx2x_hc_int_disable(bp);
1254 else
1255 bnx2x_igu_int_disable(bp);
1256}
1257
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001258void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001259{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001260 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +00001261 int i, offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001262
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001263 /* disable interrupt handling */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001264 atomic_inc(&bp->intr_sem);
Eilon Greensteine1510702009-07-21 05:47:41 +00001265 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1266
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07001267 if (disable_hw)
1268 /* prevent the HW from sending interrupts */
1269 bnx2x_int_disable(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001270
1271 /* make sure all ISRs are done */
1272 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001273 synchronize_irq(bp->msix_table[0].vector);
1274 offset = 1;
Michael Chan37b091b2009-10-10 13:46:55 +00001275#ifdef BCM_CNIC
1276 offset++;
1277#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001278 for_each_queue(bp, i)
Eilon Greenstein8badd272009-02-12 08:36:15 +00001279 synchronize_irq(bp->msix_table[i + offset].vector);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001280 } else
1281 synchronize_irq(bp->pdev->irq);
1282
1283 /* make sure sp_task is not running */
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001284 cancel_delayed_work(&bp->sp_task);
1285 flush_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001286}
1287
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001288/* fast path */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001289
1290/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001291 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001292 */
1293
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001294/* Return true if succeeded to acquire the lock */
1295static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1296{
1297 u32 lock_status;
1298 u32 resource_bit = (1 << resource);
1299 int func = BP_FUNC(bp);
1300 u32 hw_lock_control_reg;
1301
1302 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1303
1304 /* Validating that the resource is within range */
1305 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1306 DP(NETIF_MSG_HW,
1307 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1308 resource, HW_LOCK_MAX_RESOURCE_VALUE);
Eric Dumazet0fdf4d02010-08-26 22:03:53 -07001309 return false;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001310 }
1311
1312 if (func <= 5)
1313 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1314 else
1315 hw_lock_control_reg =
1316 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1317
1318 /* Try to acquire the lock */
1319 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1320 lock_status = REG_RD(bp, hw_lock_control_reg);
1321 if (lock_status & resource_bit)
1322 return true;
1323
1324 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1325 return false;
1326}
1327
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001328
Michael Chan993ac7b2009-10-10 13:46:56 +00001329#ifdef BCM_CNIC
1330static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1331#endif
Eilon Greenstein3196a882008-08-13 15:58:49 -07001332
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001333void bnx2x_sp_event(struct bnx2x_fastpath *fp,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001334 union eth_rx_cqe *rr_cqe)
1335{
1336 struct bnx2x *bp = fp->bp;
1337 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1338 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1339
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001340 DP(BNX2X_MSG_SP,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001341 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +00001342 fp->index, cid, command, bp->state,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001343 rr_cqe->ramrod_cqe.ramrod_type);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001344
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001345 switch (command | fp->state) {
1346 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1347 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1348 fp->state = BNX2X_FP_STATE_OPEN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001349 break;
1350
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001351 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1352 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001353 fp->state = BNX2X_FP_STATE_HALTED;
1354 break;
1355
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001356 case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1357 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1358 fp->state = BNX2X_FP_STATE_TERMINATED;
Eliezer Tamir49d66772008-02-28 11:53:13 -08001359 break;
1360
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001361 default:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001362 BNX2X_ERR("unexpected MC reply (%d) "
1363 "fp[%d] state is %x\n",
1364 command, fp->index, fp->state);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001365 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001366 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001367
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00001368 smp_mb__before_atomic_inc();
1369 atomic_inc(&bp->spq_left);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001370 /* push the change in fp->state and towards the memory */
1371 smp_wmb();
1372
1373 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001374}
1375
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001376irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001377{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001378 struct bnx2x *bp = netdev_priv(dev_instance);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001379 u16 status = bnx2x_ack_int(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001380 u16 mask;
Eilon Greensteinca003922009-08-12 22:53:28 -07001381 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001382
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001383 /* Return here if interrupt is shared and it's not for us */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001384 if (unlikely(status == 0)) {
1385 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1386 return IRQ_NONE;
1387 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001388 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001389
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001390 /* Return here if interrupt is disabled */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001391 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1392 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1393 return IRQ_HANDLED;
1394 }
1395
Eilon Greenstein3196a882008-08-13 15:58:49 -07001396#ifdef BNX2X_STOP_ON_ERROR
1397 if (unlikely(bp->panic))
1398 return IRQ_HANDLED;
1399#endif
1400
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001401 for_each_queue(bp, i) {
Eilon Greensteinca003922009-08-12 22:53:28 -07001402 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001403
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001404 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
Eilon Greensteinca003922009-08-12 22:53:28 -07001405 if (status & mask) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001406 /* Handle Rx and Tx according to SB id */
1407 prefetch(fp->rx_cons_sb);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001408 prefetch(fp->tx_cons_sb);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001409 prefetch(&fp->sb_running_index[SM_RX_ID]);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001410 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
Eilon Greensteinca003922009-08-12 22:53:28 -07001411 status &= ~mask;
1412 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001413 }
1414
Michael Chan993ac7b2009-10-10 13:46:56 +00001415#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001416 mask = 0x2;
Michael Chan993ac7b2009-10-10 13:46:56 +00001417 if (status & (mask | 0x1)) {
1418 struct cnic_ops *c_ops = NULL;
1419
1420 rcu_read_lock();
1421 c_ops = rcu_dereference(bp->cnic_ops);
1422 if (c_ops)
1423 c_ops->cnic_handler(bp->cnic_data, NULL);
1424 rcu_read_unlock();
1425
1426 status &= ~mask;
1427 }
1428#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001429
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001430 if (unlikely(status & 0x1)) {
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001431 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001432
1433 status &= ~0x1;
1434 if (!status)
1435 return IRQ_HANDLED;
1436 }
1437
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00001438 if (unlikely(status))
1439 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001440 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001441
1442 return IRQ_HANDLED;
1443}
1444
1445/* end of fast path */
1446
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001447
1448/* Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001449
1450/*
1451 * General service functions
1452 */
1453
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001454int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001455{
Eliezer Tamirf1410642008-02-28 11:51:50 -08001456 u32 lock_status;
1457 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001458 int func = BP_FUNC(bp);
1459 u32 hw_lock_control_reg;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001460 int cnt;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001461
1462 /* Validating that the resource is within range */
1463 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1464 DP(NETIF_MSG_HW,
1465 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1466 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1467 return -EINVAL;
1468 }
1469
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001470 if (func <= 5) {
1471 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1472 } else {
1473 hw_lock_control_reg =
1474 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1475 }
1476
Eliezer Tamirf1410642008-02-28 11:51:50 -08001477 /* Validating that the resource is not already taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001478 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001479 if (lock_status & resource_bit) {
1480 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1481 lock_status, resource_bit);
1482 return -EEXIST;
1483 }
1484
Eilon Greenstein46230476b2008-08-25 15:23:30 -07001485 /* Try for 5 second every 5ms */
1486 for (cnt = 0; cnt < 1000; cnt++) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08001487 /* Try to acquire the lock */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001488 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1489 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001490 if (lock_status & resource_bit)
1491 return 0;
1492
1493 msleep(5);
1494 }
1495 DP(NETIF_MSG_HW, "Timeout\n");
1496 return -EAGAIN;
1497}
1498
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001499int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001500{
1501 u32 lock_status;
1502 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001503 int func = BP_FUNC(bp);
1504 u32 hw_lock_control_reg;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001505
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001506 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1507
Eliezer Tamirf1410642008-02-28 11:51:50 -08001508 /* Validating that the resource is within range */
1509 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1510 DP(NETIF_MSG_HW,
1511 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1512 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1513 return -EINVAL;
1514 }
1515
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001516 if (func <= 5) {
1517 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1518 } else {
1519 hw_lock_control_reg =
1520 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1521 }
1522
Eliezer Tamirf1410642008-02-28 11:51:50 -08001523 /* Validating that the resource is currently taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001524 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001525 if (!(lock_status & resource_bit)) {
1526 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1527 lock_status, resource_bit);
1528 return -EFAULT;
1529 }
1530
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001531 REG_WR(bp, hw_lock_control_reg, resource_bit);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001532 return 0;
1533}
1534
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001535
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001536int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1537{
1538 /* The GPIO should be swapped if swap register is set and active */
1539 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1540 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1541 int gpio_shift = gpio_num +
1542 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1543 u32 gpio_mask = (1 << gpio_shift);
1544 u32 gpio_reg;
1545 int value;
1546
1547 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1548 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1549 return -EINVAL;
1550 }
1551
1552 /* read GPIO value */
1553 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1554
1555 /* get the requested pin value */
1556 if ((gpio_reg & gpio_mask) == gpio_mask)
1557 value = 1;
1558 else
1559 value = 0;
1560
1561 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1562
1563 return value;
1564}
1565
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001566int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001567{
1568 /* The GPIO should be swapped if swap register is set and active */
1569 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001570 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001571 int gpio_shift = gpio_num +
1572 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1573 u32 gpio_mask = (1 << gpio_shift);
1574 u32 gpio_reg;
1575
1576 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1577 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1578 return -EINVAL;
1579 }
1580
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001581 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001582 /* read GPIO and mask except the float bits */
1583 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1584
1585 switch (mode) {
1586 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1587 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1588 gpio_num, gpio_shift);
1589 /* clear FLOAT and set CLR */
1590 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1591 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1592 break;
1593
1594 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1595 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1596 gpio_num, gpio_shift);
1597 /* clear FLOAT and set SET */
1598 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1599 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1600 break;
1601
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001602 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001603 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1604 gpio_num, gpio_shift);
1605 /* set FLOAT */
1606 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1607 break;
1608
1609 default:
1610 break;
1611 }
1612
1613 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001614 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001615
1616 return 0;
1617}
1618
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001619int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1620{
1621 /* The GPIO should be swapped if swap register is set and active */
1622 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1623 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1624 int gpio_shift = gpio_num +
1625 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1626 u32 gpio_mask = (1 << gpio_shift);
1627 u32 gpio_reg;
1628
1629 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1630 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1631 return -EINVAL;
1632 }
1633
1634 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1635 /* read GPIO int */
1636 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1637
1638 switch (mode) {
1639 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1640 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1641 "output low\n", gpio_num, gpio_shift);
1642 /* clear SET and set CLR */
1643 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1644 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1645 break;
1646
1647 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1648 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1649 "output high\n", gpio_num, gpio_shift);
1650 /* clear CLR and set SET */
1651 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1652 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1653 break;
1654
1655 default:
1656 break;
1657 }
1658
1659 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1660 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1661
1662 return 0;
1663}
1664
Eliezer Tamirf1410642008-02-28 11:51:50 -08001665static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1666{
1667 u32 spio_mask = (1 << spio_num);
1668 u32 spio_reg;
1669
1670 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1671 (spio_num > MISC_REGISTERS_SPIO_7)) {
1672 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1673 return -EINVAL;
1674 }
1675
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001676 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001677 /* read SPIO and mask except the float bits */
1678 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1679
1680 switch (mode) {
Eilon Greenstein6378c022008-08-13 15:59:25 -07001681 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001682 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1683 /* clear FLOAT and set CLR */
1684 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1685 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1686 break;
1687
Eilon Greenstein6378c022008-08-13 15:59:25 -07001688 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001689 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1690 /* clear FLOAT and set SET */
1691 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1692 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1693 break;
1694
1695 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1696 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1697 /* set FLOAT */
1698 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1699 break;
1700
1701 default:
1702 break;
1703 }
1704
1705 REG_WR(bp, MISC_REG_SPIO, spio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001706 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001707
1708 return 0;
1709}
1710
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001711int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1712{
1713 u32 sel_phy_idx = 0;
1714 if (bp->link_vars.link_up) {
1715 sel_phy_idx = EXT_PHY1;
1716 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1717 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1718 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1719 sel_phy_idx = EXT_PHY2;
1720 } else {
1721
1722 switch (bnx2x_phy_selection(&bp->link_params)) {
1723 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1724 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1725 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1726 sel_phy_idx = EXT_PHY1;
1727 break;
1728 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1729 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1730 sel_phy_idx = EXT_PHY2;
1731 break;
1732 }
1733 }
1734 /*
1735 * The selected actived PHY is always after swapping (in case PHY
1736 * swapping is enabled). So when swapping is enabled, we need to reverse
1737 * the configuration
1738 */
1739
1740 if (bp->link_params.multi_phy_config &
1741 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1742 if (sel_phy_idx == EXT_PHY1)
1743 sel_phy_idx = EXT_PHY2;
1744 else if (sel_phy_idx == EXT_PHY2)
1745 sel_phy_idx = EXT_PHY1;
1746 }
1747 return LINK_CONFIG_IDX(sel_phy_idx);
1748}
1749
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001750void bnx2x_calc_fc_adv(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001751{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001752 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
Eilon Greensteinad33ea32009-01-14 21:24:57 -08001753 switch (bp->link_vars.ieee_fc &
1754 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001755 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001756 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001757 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001758 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001759
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001760 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001761 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001762 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001763 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001764
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001765 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001766 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001767 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001768
Eliezer Tamirf1410642008-02-28 11:51:50 -08001769 default:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001770 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001771 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001772 break;
1773 }
1774}
1775
Eilon Greenstein2691d512009-08-12 08:22:08 +00001776
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001777u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001778{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001779 if (!BP_NOMCP(bp)) {
1780 u8 rc;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001781 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1782 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
Eilon Greenstein19680c42008-08-13 15:47:33 -07001783 /* Initialize link parameters structure variables */
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07001784 /* It is recommended to turn off RX FC for jumbo frames
1785 for better performance */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001786 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
David S. Millerc0700f92008-12-16 23:53:20 -08001787 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07001788 else
David S. Millerc0700f92008-12-16 23:53:20 -08001789 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001790
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001791 bnx2x_acquire_phy_lock(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001792
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001793 if (load_mode == LOAD_DIAG) {
Yaniv Rosnerde6eae12010-09-07 11:41:13 +00001794 bp->link_params.loopback_mode = LOOPBACK_XGXS;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001795 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1796 }
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001797
Eilon Greenstein19680c42008-08-13 15:47:33 -07001798 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001799
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001800 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001801
Eilon Greenstein3c96c682009-01-14 21:25:31 -08001802 bnx2x_calc_fc_adv(bp);
1803
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001804 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1805 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001806 bnx2x_link_report(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001807 }
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001808 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
Eilon Greenstein19680c42008-08-13 15:47:33 -07001809 return rc;
1810 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001811 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
Eilon Greenstein19680c42008-08-13 15:47:33 -07001812 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001813}
1814
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001815void bnx2x_link_set(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001816{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001817 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001818 bnx2x_acquire_phy_lock(bp);
Yaniv Rosner54c2fb72010-09-01 09:51:23 +00001819 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001820 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001821 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001822
Eilon Greenstein19680c42008-08-13 15:47:33 -07001823 bnx2x_calc_fc_adv(bp);
1824 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00001825 BNX2X_ERR("Bootcode is missing - can not set link\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001826}
1827
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001828static void bnx2x__link_reset(struct bnx2x *bp)
1829{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001830 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001831 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein589abe32009-02-12 08:36:55 +00001832 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001833 bnx2x_release_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001834 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00001835 BNX2X_ERR("Bootcode is missing - can not reset link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001836}
1837
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001838u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001839{
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001840 u8 rc = 0;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001841
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001842 if (!BP_NOMCP(bp)) {
1843 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001844 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1845 is_serdes);
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001846 bnx2x_release_phy_lock(bp);
1847 } else
1848 BNX2X_ERR("Bootcode is missing - can not test link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001849
1850 return rc;
1851}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001852
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001853static void bnx2x_init_port_minmax(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001854{
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001855 u32 r_param = bp->link_vars.line_speed / 8;
1856 u32 fair_periodic_timeout_usec;
1857 u32 t_fair;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001858
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001859 memset(&(bp->cmng.rs_vars), 0,
1860 sizeof(struct rate_shaping_vars_per_port));
1861 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001862
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001863 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1864 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001865
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001866 /* this is the threshold below which no timer arming will occur
1867 1.25 coefficient is for the threshold to be a little bigger
1868 than the real time, to compensate for timer in-accuracy */
1869 bp->cmng.rs_vars.rs_threshold =
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001870 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1871
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001872 /* resolution of fairness timer */
1873 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1874 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1875 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001876
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001877 /* this is the threshold below which we won't arm the timer anymore */
1878 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001879
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001880 /* we multiply by 1e3/8 to get bytes/msec.
1881 We don't want the credits to pass a credit
1882 of the t_fair*FAIR_MEM (algorithm resolution) */
1883 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1884 /* since each tick is 4 usec */
1885 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001886}
1887
Eilon Greenstein2691d512009-08-12 08:22:08 +00001888/* Calculates the sum of vn_min_rates.
1889 It's needed for further normalizing of the min_rates.
1890 Returns:
1891 sum of vn_min_rates.
1892 or
1893 0 - if all the min_rates are 0.
1894 In the later case fainess algorithm should be deactivated.
1895 If not all min_rates are zero then those that are zeroes will be set to 1.
1896 */
1897static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1898{
1899 int all_zero = 1;
Eilon Greenstein2691d512009-08-12 08:22:08 +00001900 int vn;
1901
1902 bp->vn_weight_sum = 0;
1903 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001904 u32 vn_cfg = bp->mf_config[vn];
Eilon Greenstein2691d512009-08-12 08:22:08 +00001905 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1906 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1907
1908 /* Skip hidden vns */
1909 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1910 continue;
1911
1912 /* If min rate is zero - set it to 1 */
1913 if (!vn_min_rate)
1914 vn_min_rate = DEF_MIN_RATE;
1915 else
1916 all_zero = 0;
1917
1918 bp->vn_weight_sum += vn_min_rate;
1919 }
1920
1921 /* ... only if all min rates are zeros - disable fairness */
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001922 if (all_zero) {
1923 bp->cmng.flags.cmng_enables &=
1924 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1925 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1926 " fairness will be disabled\n");
1927 } else
1928 bp->cmng.flags.cmng_enables |=
1929 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
Eilon Greenstein2691d512009-08-12 08:22:08 +00001930}
1931
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001932static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001933{
1934 struct rate_shaping_vars_per_vn m_rs_vn;
1935 struct fairness_vars_per_vn m_fair_vn;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001936 u32 vn_cfg = bp->mf_config[vn];
1937 int func = 2*vn + BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001938 u16 vn_min_rate, vn_max_rate;
1939 int i;
1940
1941 /* If function is hidden - set min and max to zeroes */
1942 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1943 vn_min_rate = 0;
1944 vn_max_rate = 0;
1945
1946 } else {
1947 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1948 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001949 /* If min rate is zero - set it to 1 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001950 if (bp->vn_weight_sum && (vn_min_rate == 0))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001951 vn_min_rate = DEF_MIN_RATE;
1952 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1953 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1954 }
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001955 DP(NETIF_MSG_IFUP,
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001956 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001957 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001958
1959 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1960 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1961
1962 /* global vn counter - maximal Mbps for this vn */
1963 m_rs_vn.vn_counter.rate = vn_max_rate;
1964
1965 /* quota - number of bytes transmitted in this period */
1966 m_rs_vn.vn_counter.quota =
1967 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1968
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001969 if (bp->vn_weight_sum) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001970 /* credit for each period of the fairness algorithm:
1971 number of bytes in T_FAIR (the vn share the port rate).
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001972 vn_weight_sum should not be larger than 10000, thus
1973 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1974 than zero */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001975 m_fair_vn.vn_credit_delta =
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00001976 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1977 (8 * bp->vn_weight_sum))),
1978 (bp->cmng.fair_vars.fair_threshold * 2));
1979 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001980 m_fair_vn.vn_credit_delta);
1981 }
1982
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001983 /* Store it to internal memory */
1984 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1985 REG_WR(bp, BAR_XSTRORM_INTMEM +
1986 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1987 ((u32 *)(&m_rs_vn))[i]);
1988
1989 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1990 REG_WR(bp, BAR_XSTRORM_INTMEM +
1991 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1992 ((u32 *)(&m_fair_vn))[i]);
1993}
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001994static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
1995{
1996 if (CHIP_REV_IS_SLOW(bp))
1997 return CMNG_FNS_NONE;
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00001998 if (IS_MF(bp))
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001999 return CMNG_FNS_MINMAX;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002000
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002001 return CMNG_FNS_NONE;
2002}
2003
2004static void bnx2x_read_mf_cfg(struct bnx2x *bp)
2005{
2006 int vn;
2007
2008 if (BP_NOMCP(bp))
2009 return; /* what should be the default bvalue in this case */
2010
2011 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2012 int /*abs*/func = 2*vn + BP_PORT(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002013 bp->mf_config[vn] =
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002014 MF_CFG_RD(bp, func_mf_config[func].config);
2015 }
2016}
2017
2018static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2019{
2020
2021 if (cmng_type == CMNG_FNS_MINMAX) {
2022 int vn;
2023
2024 /* clear cmng_enables */
2025 bp->cmng.flags.cmng_enables = 0;
2026
2027 /* read mf conf from shmem */
2028 if (read_cfg)
2029 bnx2x_read_mf_cfg(bp);
2030
2031 /* Init rate shaping and fairness contexts */
2032 bnx2x_init_port_minmax(bp);
2033
2034 /* vn_weight_sum and enable fairness if not 0 */
2035 bnx2x_calc_vn_weight_sum(bp);
2036
2037 /* calculate and set min-max rate for each vn */
2038 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2039 bnx2x_init_vn_minmax(bp, vn);
2040
2041 /* always enable rate shaping and fairness */
2042 bp->cmng.flags.cmng_enables |=
2043 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2044 if (!bp->vn_weight_sum)
2045 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2046 " fairness will be disabled\n");
2047 return;
2048 }
2049
2050 /* rate shaping and fairness are disabled */
2051 DP(NETIF_MSG_IFUP,
2052 "rate shaping and fairness are disabled\n");
2053}
2054
2055static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2056{
2057 int port = BP_PORT(bp);
2058 int func;
2059 int vn;
2060
2061 /* Set the attention towards other drivers on the same port */
2062 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2063 if (vn == BP_E1HVN(bp))
2064 continue;
2065
2066 func = ((vn << 1) | port);
2067 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2068 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2069 }
2070}
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002071
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002072/* This function is called upon link interrupt */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002073static void bnx2x_link_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002074{
Vladislav Zolotarovd9e8b182010-04-19 01:15:08 +00002075 u32 prev_link_status = bp->link_vars.link_status;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002076 /* Make sure that we are synced with the current statistics */
2077 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2078
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002079 bnx2x_link_update(&bp->link_params, &bp->link_vars);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002080
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002081 if (bp->link_vars.link_up) {
2082
Eilon Greenstein1c063282009-02-12 08:36:43 +00002083 /* dropless flow control */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002084 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
Eilon Greenstein1c063282009-02-12 08:36:43 +00002085 int port = BP_PORT(bp);
2086 u32 pause_enabled = 0;
2087
2088 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2089 pause_enabled = 1;
2090
2091 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07002092 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
Eilon Greenstein1c063282009-02-12 08:36:43 +00002093 pause_enabled);
2094 }
2095
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002096 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2097 struct host_port_stats *pstats;
2098
2099 pstats = bnx2x_sp(bp, port_stats);
2100 /* reset old bmac stats */
2101 memset(&(pstats->mac_stx[0]), 0,
2102 sizeof(struct mac_stx));
2103 }
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002104 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002105 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2106 }
2107
Vladislav Zolotarovd9e8b182010-04-19 01:15:08 +00002108 /* indicate link status only if link status actually changed */
2109 if (prev_link_status != bp->link_vars.link_status)
2110 bnx2x_link_report(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002111
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002112 if (IS_MF(bp))
2113 bnx2x_link_sync_notify(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002114
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002115 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2116 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002117
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002118 if (cmng_fns != CMNG_FNS_NONE) {
2119 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2120 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2121 } else
2122 /* rate shaping and fairness are disabled */
2123 DP(NETIF_MSG_IFUP,
2124 "single function mode without fairness\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002125 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002126}
2127
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002128void bnx2x__link_status_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002129{
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002130 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002131 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002132
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002133 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2134
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002135 if (bp->link_vars.link_up)
2136 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2137 else
2138 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2139
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002140 /* the link status update could be the result of a DCC event
2141 hence re-read the shmem mf configuration */
2142 bnx2x_read_mf_cfg(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002143
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002144 /* indicate link status */
2145 bnx2x_link_report(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002146}
2147
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002148static void bnx2x_pmf_update(struct bnx2x *bp)
2149{
2150 int port = BP_PORT(bp);
2151 u32 val;
2152
2153 bp->port.pmf = 1;
2154 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2155
2156 /* enable nig attention */
2157 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002158 if (bp->common.int_block == INT_BLOCK_HC) {
2159 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2160 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2161 } else if (CHIP_IS_E2(bp)) {
2162 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2163 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2164 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002165
2166 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002167}
2168
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002169/* end of Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002170
2171/* slow path */
2172
2173/*
2174 * General service functions
2175 */
2176
Eilon Greenstein2691d512009-08-12 08:22:08 +00002177/* send the MCP a request, block until there is a reply */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002178u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
Eilon Greenstein2691d512009-08-12 08:22:08 +00002179{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002180 int mb_idx = BP_FW_MB_IDX(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002181 u32 seq = ++bp->fw_seq;
2182 u32 rc = 0;
2183 u32 cnt = 1;
2184 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2185
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002186 mutex_lock(&bp->fw_mb_mutex);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002187 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2188 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2189
Eilon Greenstein2691d512009-08-12 08:22:08 +00002190 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2191
2192 do {
2193 /* let the FW do it's magic ... */
2194 msleep(delay);
2195
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002196 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002197
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002198 /* Give the FW up to 5 second (500*10ms) */
2199 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
Eilon Greenstein2691d512009-08-12 08:22:08 +00002200
2201 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2202 cnt*delay, rc, seq);
2203
2204 /* is this a reply to our command? */
2205 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2206 rc &= FW_MSG_CODE_MASK;
2207 else {
2208 /* FW BUG! */
2209 BNX2X_ERR("FW failed to respond!\n");
2210 bnx2x_fw_dump(bp);
2211 rc = 0;
2212 }
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002213 mutex_unlock(&bp->fw_mb_mutex);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002214
2215 return rc;
2216}
2217
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002218/* must be called under rtnl_lock */
2219void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2220{
2221 u32 mask = (1 << cl_id);
2222
2223 /* initial seeting is BNX2X_ACCEPT_NONE */
2224 u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2225 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2226 u8 unmatched_unicast = 0;
2227
2228 if (filters & BNX2X_PROMISCUOUS_MODE) {
2229 /* promiscious - accept all, drop none */
2230 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2231 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
2232 }
2233 if (filters & BNX2X_ACCEPT_UNICAST) {
2234 /* accept matched ucast */
2235 drop_all_ucast = 0;
2236 }
2237 if (filters & BNX2X_ACCEPT_MULTICAST) {
2238 /* accept matched mcast */
2239 drop_all_mcast = 0;
2240 }
2241 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2242 /* accept all mcast */
2243 drop_all_ucast = 0;
2244 accp_all_ucast = 1;
2245 }
2246 if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2247 /* accept all mcast */
2248 drop_all_mcast = 0;
2249 accp_all_mcast = 1;
2250 }
2251 if (filters & BNX2X_ACCEPT_BROADCAST) {
2252 /* accept (all) bcast */
2253 drop_all_bcast = 0;
2254 accp_all_bcast = 1;
2255 }
2256
2257 bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2258 bp->mac_filters.ucast_drop_all | mask :
2259 bp->mac_filters.ucast_drop_all & ~mask;
2260
2261 bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2262 bp->mac_filters.mcast_drop_all | mask :
2263 bp->mac_filters.mcast_drop_all & ~mask;
2264
2265 bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2266 bp->mac_filters.bcast_drop_all | mask :
2267 bp->mac_filters.bcast_drop_all & ~mask;
2268
2269 bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2270 bp->mac_filters.ucast_accept_all | mask :
2271 bp->mac_filters.ucast_accept_all & ~mask;
2272
2273 bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2274 bp->mac_filters.mcast_accept_all | mask :
2275 bp->mac_filters.mcast_accept_all & ~mask;
2276
2277 bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2278 bp->mac_filters.bcast_accept_all | mask :
2279 bp->mac_filters.bcast_accept_all & ~mask;
2280
2281 bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2282 bp->mac_filters.unmatched_unicast | mask :
2283 bp->mac_filters.unmatched_unicast & ~mask;
2284}
2285
2286void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2287{
2288 if (FUNC_CONFIG(p->func_flgs)) {
2289 struct tstorm_eth_function_common_config tcfg = {0};
2290
2291 /* tpa */
2292 if (p->func_flgs & FUNC_FLG_TPA)
2293 tcfg.config_flags |=
2294 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
2295
2296 /* set rss flags */
2297 if (p->func_flgs & FUNC_FLG_RSS) {
2298 u16 rss_flgs = (p->rss->mode <<
2299 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
2300
2301 if (p->rss->cap & RSS_IPV4_CAP)
2302 rss_flgs |= RSS_IPV4_CAP_MASK;
2303 if (p->rss->cap & RSS_IPV4_TCP_CAP)
2304 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2305 if (p->rss->cap & RSS_IPV6_CAP)
2306 rss_flgs |= RSS_IPV6_CAP_MASK;
2307 if (p->rss->cap & RSS_IPV6_TCP_CAP)
2308 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
2309
2310 tcfg.config_flags |= rss_flgs;
2311 tcfg.rss_result_mask = p->rss->result_mask;
2312
2313 }
2314
2315 storm_memset_func_cfg(bp, &tcfg, p->func_id);
2316 }
2317
2318 /* Enable the function in the FW */
2319 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2320 storm_memset_func_en(bp, p->func_id, 1);
2321
2322 /* statistics */
2323 if (p->func_flgs & FUNC_FLG_STATS) {
2324 struct stats_indication_flags stats_flags = {0};
2325 stats_flags.collect_eth = 1;
2326
2327 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2328 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2329
2330 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2331 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2332
2333 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2334 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2335
2336 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2337 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2338 }
2339
2340 /* spq */
2341 if (p->func_flgs & FUNC_FLG_SPQ) {
2342 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2343 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2344 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2345 }
2346}
2347
2348static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2349 struct bnx2x_fastpath *fp)
2350{
2351 u16 flags = 0;
2352
2353 /* calculate queue flags */
2354 flags |= QUEUE_FLG_CACHE_ALIGN;
2355 flags |= QUEUE_FLG_HC;
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00002356 flags |= IS_MF(bp) ? QUEUE_FLG_OV : 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002357
2358#ifdef BCM_VLAN
2359 flags |= QUEUE_FLG_VLAN;
2360 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
2361#endif
2362
2363 if (!fp->disable_tpa)
2364 flags |= QUEUE_FLG_TPA;
2365
2366 flags |= QUEUE_FLG_STATS;
2367
2368 return flags;
2369}
2370
2371static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2372 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2373 struct bnx2x_rxq_init_params *rxq_init)
2374{
2375 u16 max_sge = 0;
2376 u16 sge_sz = 0;
2377 u16 tpa_agg_size = 0;
2378
2379 /* calculate queue flags */
2380 u16 flags = bnx2x_get_cl_flags(bp, fp);
2381
2382 if (!fp->disable_tpa) {
2383 pause->sge_th_hi = 250;
2384 pause->sge_th_lo = 150;
2385 tpa_agg_size = min_t(u32,
2386 (min_t(u32, 8, MAX_SKB_FRAGS) *
2387 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2388 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2389 SGE_PAGE_SHIFT;
2390 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2391 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2392 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2393 0xffff);
2394 }
2395
2396 /* pause - not for e1 */
2397 if (!CHIP_IS_E1(bp)) {
2398 pause->bd_th_hi = 350;
2399 pause->bd_th_lo = 250;
2400 pause->rcq_th_hi = 350;
2401 pause->rcq_th_lo = 250;
2402 pause->sge_th_hi = 0;
2403 pause->sge_th_lo = 0;
2404 pause->pri_map = 1;
2405 }
2406
2407 /* rxq setup */
2408 rxq_init->flags = flags;
2409 rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2410 rxq_init->dscr_map = fp->rx_desc_mapping;
2411 rxq_init->sge_map = fp->rx_sge_mapping;
2412 rxq_init->rcq_map = fp->rx_comp_mapping;
2413 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2414 rxq_init->mtu = bp->dev->mtu;
2415 rxq_init->buf_sz = bp->rx_buf_size;
2416 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2417 rxq_init->cl_id = fp->cl_id;
2418 rxq_init->spcl_id = fp->cl_id;
2419 rxq_init->stat_id = fp->cl_id;
2420 rxq_init->tpa_agg_sz = tpa_agg_size;
2421 rxq_init->sge_buf_sz = sge_sz;
2422 rxq_init->max_sges_pkt = max_sge;
2423 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2424 rxq_init->fw_sb_id = fp->fw_sb_id;
2425
2426 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
2427
2428 rxq_init->cid = HW_CID(bp, fp->cid);
2429
2430 rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2431}
2432
2433static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2434 struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2435{
2436 u16 flags = bnx2x_get_cl_flags(bp, fp);
2437
2438 txq_init->flags = flags;
2439 txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2440 txq_init->dscr_map = fp->tx_desc_mapping;
2441 txq_init->stat_id = fp->cl_id;
2442 txq_init->cid = HW_CID(bp, fp->cid);
2443 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2444 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2445 txq_init->fw_sb_id = fp->fw_sb_id;
2446 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2447}
2448
2449void bnx2x_pf_init(struct bnx2x *bp)
2450{
2451 struct bnx2x_func_init_params func_init = {0};
2452 struct bnx2x_rss_params rss = {0};
2453 struct event_ring_data eq_data = { {0} };
2454 u16 flags;
2455
2456 /* pf specific setups */
2457 if (!CHIP_IS_E1(bp))
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00002458 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002459
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002460 if (CHIP_IS_E2(bp)) {
2461 /* reset IGU PF statistics: MSIX + ATTN */
2462 /* PF */
2463 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2464 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2465 (CHIP_MODE_IS_4_PORT(bp) ?
2466 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2467 /* ATTN */
2468 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2469 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2470 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2471 (CHIP_MODE_IS_4_PORT(bp) ?
2472 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2473 }
2474
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002475 /* function setup flags */
2476 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2477
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002478 if (CHIP_IS_E1x(bp))
2479 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2480 else
2481 flags |= FUNC_FLG_TPA;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002482
2483 /**
2484 * Although RSS is meaningless when there is a single HW queue we
2485 * still need it enabled in order to have HW Rx hash generated.
2486 *
2487 * if (is_eth_multi(bp))
2488 * flags |= FUNC_FLG_RSS;
2489 */
2490
2491 /* function setup */
2492 if (flags & FUNC_FLG_RSS) {
2493 rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2494 RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2495 rss.mode = bp->multi_mode;
2496 rss.result_mask = MULTI_MASK;
2497 func_init.rss = &rss;
2498 }
2499
2500 func_init.func_flgs = flags;
2501 func_init.pf_id = BP_FUNC(bp);
2502 func_init.func_id = BP_FUNC(bp);
2503 func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2504 func_init.spq_map = bp->spq_mapping;
2505 func_init.spq_prod = bp->spq_prod_idx;
2506
2507 bnx2x_func_init(bp, &func_init);
2508
2509 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2510
2511 /*
2512 Congestion management values depend on the link rate
2513 There is no active link so initial link rate is set to 10 Gbps.
2514 When the link comes up The congestion management values are
2515 re-calculated according to the actual link rate.
2516 */
2517 bp->link_vars.line_speed = SPEED_10000;
2518 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2519
2520 /* Only the PMF sets the HW */
2521 if (bp->port.pmf)
2522 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2523
2524 /* no rx until link is up */
2525 bp->rx_mode = BNX2X_RX_MODE_NONE;
2526 bnx2x_set_storm_rx_mode(bp);
2527
2528 /* init Event Queue */
2529 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2530 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2531 eq_data.producer = bp->eq_prod;
2532 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2533 eq_data.sb_id = DEF_SB_ID;
2534 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2535}
2536
2537
Eilon Greenstein2691d512009-08-12 08:22:08 +00002538static void bnx2x_e1h_disable(struct bnx2x *bp)
2539{
2540 int port = BP_PORT(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002541
2542 netif_tx_disable(bp->dev);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002543
2544 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2545
Eilon Greenstein2691d512009-08-12 08:22:08 +00002546 netif_carrier_off(bp->dev);
2547}
2548
2549static void bnx2x_e1h_enable(struct bnx2x *bp)
2550{
2551 int port = BP_PORT(bp);
2552
2553 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2554
Eilon Greenstein2691d512009-08-12 08:22:08 +00002555 /* Tx queue should be only reenabled */
2556 netif_tx_wake_all_queues(bp->dev);
2557
Eilon Greenstein061bc702009-10-15 00:18:47 -07002558 /*
2559 * Should not call netif_carrier_on since it will be called if the link
2560 * is up when checking for link state
2561 */
Eilon Greenstein2691d512009-08-12 08:22:08 +00002562}
2563
Eilon Greenstein2691d512009-08-12 08:22:08 +00002564static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2565{
Eilon Greenstein2691d512009-08-12 08:22:08 +00002566 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002567
2568 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2569
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002570 /*
2571 * This is the only place besides the function initialization
2572 * where the bp->flags can change so it is done without any
2573 * locks
2574 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002575 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
Eilon Greenstein2691d512009-08-12 08:22:08 +00002576 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002577 bp->flags |= MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002578
2579 bnx2x_e1h_disable(bp);
2580 } else {
2581 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002582 bp->flags &= ~MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002583
2584 bnx2x_e1h_enable(bp);
2585 }
2586 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2587 }
2588 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2589
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002590 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2591 bnx2x_link_sync_notify(bp);
2592 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
Eilon Greenstein2691d512009-08-12 08:22:08 +00002593 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2594 }
2595
2596 /* Report results to MCP */
2597 if (dcc_event)
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002598 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002599 else
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002600 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002601}
2602
Michael Chan28912902009-10-10 13:46:53 +00002603/* must be called under the spq lock */
2604static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2605{
2606 struct eth_spe *next_spe = bp->spq_prod_bd;
2607
2608 if (bp->spq_prod_bd == bp->spq_last_bd) {
2609 bp->spq_prod_bd = bp->spq;
2610 bp->spq_prod_idx = 0;
2611 DP(NETIF_MSG_TIMER, "end of spq\n");
2612 } else {
2613 bp->spq_prod_bd++;
2614 bp->spq_prod_idx++;
2615 }
2616 return next_spe;
2617}
2618
2619/* must be called under the spq lock */
2620static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2621{
2622 int func = BP_FUNC(bp);
2623
2624 /* Make sure that BD data is updated before writing the producer */
2625 wmb();
2626
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002627 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
Michael Chan28912902009-10-10 13:46:53 +00002628 bp->spq_prod_idx);
2629 mmiowb();
2630}
2631
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002632/* the slow path queue is odd since completions arrive on the fastpath ring */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002633int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002634 u32 data_hi, u32 data_lo, int common)
2635{
Michael Chan28912902009-10-10 13:46:53 +00002636 struct eth_spe *spe;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002637 u16 type;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002638
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002639#ifdef BNX2X_STOP_ON_ERROR
2640 if (unlikely(bp->panic))
2641 return -EIO;
2642#endif
2643
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002644 spin_lock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002645
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00002646 if (!atomic_read(&bp->spq_left)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002647 BNX2X_ERR("BUG! SPQ ring full!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002648 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002649 bnx2x_panic();
2650 return -EBUSY;
2651 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08002652
Michael Chan28912902009-10-10 13:46:53 +00002653 spe = bnx2x_sp_get_next(bp);
2654
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002655 /* CID needs port number to be encoded int it */
Michael Chan28912902009-10-10 13:46:53 +00002656 spe->hdr.conn_and_cmd_data =
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002657 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2658 HW_CID(bp, cid));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002659
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002660 if (common)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002661 /* Common ramrods:
2662 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2663 * TRAFFIC_STOP, TRAFFIC_START
2664 */
2665 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2666 & SPE_HDR_CONN_TYPE;
2667 else
2668 /* ETH ramrods: SETUP, HALT */
2669 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2670 & SPE_HDR_CONN_TYPE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002671
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002672 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2673 SPE_HDR_FUNCTION_ID);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002674
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002675 spe->hdr.type = cpu_to_le16(type);
2676
2677 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2678 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2679
2680 /* stats ramrod has it's own slot on the spq */
2681 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
2682 /* It's ok if the actual decrement is issued towards the memory
2683 * somewhere between the spin_lock and spin_unlock. Thus no
2684 * more explict memory barrier is needed.
2685 */
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00002686 atomic_dec(&bp->spq_left);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002687
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002688 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002689 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
2690 "type(0x%x) left %x\n",
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002691 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2692 (u32)(U64_LO(bp->spq_mapping) +
2693 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00002694 HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002695
Michael Chan28912902009-10-10 13:46:53 +00002696 bnx2x_sp_prod_update(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002697 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002698 return 0;
2699}
2700
2701/* acquire split MCP access lock register */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002702static int bnx2x_acquire_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002703{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002704 u32 j, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002705 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002706
2707 might_sleep();
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002708 for (j = 0; j < 1000; j++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002709 val = (1UL << 31);
2710 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2711 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2712 if (val & (1L << 31))
2713 break;
2714
2715 msleep(5);
2716 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002717 if (!(val & (1L << 31))) {
Eilon Greenstein19680c42008-08-13 15:47:33 -07002718 BNX2X_ERR("Cannot acquire MCP access lock register\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002719 rc = -EBUSY;
2720 }
2721
2722 return rc;
2723}
2724
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002725/* release split MCP access lock register */
2726static void bnx2x_release_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002727{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002728 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002729}
2730
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002731#define BNX2X_DEF_SB_ATT_IDX 0x0001
2732#define BNX2X_DEF_SB_IDX 0x0002
2733
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002734static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2735{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002736 struct host_sp_status_block *def_sb = bp->def_status_blk;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002737 u16 rc = 0;
2738
2739 barrier(); /* status block is written to by the chip */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002740 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2741 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002742 rc |= BNX2X_DEF_SB_ATT_IDX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002743 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002744
2745 if (bp->def_idx != def_sb->sp_sb.running_index) {
2746 bp->def_idx = def_sb->sp_sb.running_index;
2747 rc |= BNX2X_DEF_SB_IDX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002748 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002749
2750 /* Do not reorder: indecies reading should complete before handling */
2751 barrier();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002752 return rc;
2753}
2754
2755/*
2756 * slow path service functions
2757 */
2758
2759static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2760{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002761 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002762 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2763 MISC_REG_AEU_MASK_ATTN_FUNC_0;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002764 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2765 NIG_REG_MASK_INTERRUPT_PORT0;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002766 u32 aeu_mask;
Eilon Greenstein87942b42009-02-12 08:36:49 +00002767 u32 nig_mask = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002768 u32 reg_addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002769
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002770 if (bp->attn_state & asserted)
2771 BNX2X_ERR("IGU ERROR\n");
2772
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002773 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2774 aeu_mask = REG_RD(bp, aeu_addr);
2775
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002776 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002777 aeu_mask, asserted);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002778 aeu_mask &= ~(asserted & 0x3ff);
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002779 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002780
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002781 REG_WR(bp, aeu_addr, aeu_mask);
2782 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002783
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002784 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002785 bp->attn_state |= asserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002786 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002787
2788 if (asserted & ATTN_HARD_WIRED_MASK) {
2789 if (asserted & ATTN_NIG_FOR_FUNC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002790
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002791 bnx2x_acquire_phy_lock(bp);
2792
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002793 /* save nig interrupt mask */
Eilon Greenstein87942b42009-02-12 08:36:49 +00002794 nig_mask = REG_RD(bp, nig_int_mask_addr);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002795 REG_WR(bp, nig_int_mask_addr, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002796
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002797 bnx2x_link_attn(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002798
2799 /* handle unicore attn? */
2800 }
2801 if (asserted & ATTN_SW_TIMER_4_FUNC)
2802 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2803
2804 if (asserted & GPIO_2_FUNC)
2805 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2806
2807 if (asserted & GPIO_3_FUNC)
2808 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2809
2810 if (asserted & GPIO_4_FUNC)
2811 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2812
2813 if (port == 0) {
2814 if (asserted & ATTN_GENERAL_ATTN_1) {
2815 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2816 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2817 }
2818 if (asserted & ATTN_GENERAL_ATTN_2) {
2819 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2820 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2821 }
2822 if (asserted & ATTN_GENERAL_ATTN_3) {
2823 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2824 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2825 }
2826 } else {
2827 if (asserted & ATTN_GENERAL_ATTN_4) {
2828 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2829 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2830 }
2831 if (asserted & ATTN_GENERAL_ATTN_5) {
2832 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2833 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2834 }
2835 if (asserted & ATTN_GENERAL_ATTN_6) {
2836 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2837 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2838 }
2839 }
2840
2841 } /* if hardwired */
2842
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002843 if (bp->common.int_block == INT_BLOCK_HC)
2844 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2845 COMMAND_REG_ATTN_BITS_SET);
2846 else
2847 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2848
2849 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2850 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2851 REG_WR(bp, reg_addr, asserted);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002852
2853 /* now set back the mask */
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002854 if (asserted & ATTN_NIG_FOR_FUNC) {
Eilon Greenstein87942b42009-02-12 08:36:49 +00002855 REG_WR(bp, nig_int_mask_addr, nig_mask);
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002856 bnx2x_release_phy_lock(bp);
2857 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002858}
2859
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002860static inline void bnx2x_fan_failure(struct bnx2x *bp)
2861{
2862 int port = BP_PORT(bp);
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002863 u32 ext_phy_config;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002864 /* mark the failure */
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002865 ext_phy_config =
2866 SHMEM_RD(bp,
2867 dev_info.port_hw_config[port].external_phy_config);
2868
2869 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2870 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002871 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002872 ext_phy_config);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002873
2874 /* log the failure */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002875 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2876 " the driver to shutdown the card to prevent permanent"
2877 " damage. Please contact OEM Support for assistance\n");
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002878}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00002879
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002880static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2881{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002882 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002883 int reg_offset;
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00002884 u32 val;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002885
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002886 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2887 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002888
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002889 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002890
2891 val = REG_RD(bp, reg_offset);
2892 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2893 REG_WR(bp, reg_offset, val);
2894
2895 BNX2X_ERR("SPIO5 hw attention\n");
2896
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002897 /* Fan failure attention */
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00002898 bnx2x_hw_reset_phy(&bp->link_params);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002899 bnx2x_fan_failure(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002900 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002901
Eilon Greenstein589abe32009-02-12 08:36:55 +00002902 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2903 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2904 bnx2x_acquire_phy_lock(bp);
2905 bnx2x_handle_module_detect_int(&bp->link_params);
2906 bnx2x_release_phy_lock(bp);
2907 }
2908
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002909 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2910
2911 val = REG_RD(bp, reg_offset);
2912 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2913 REG_WR(bp, reg_offset, val);
2914
2915 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002916 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002917 bnx2x_panic();
2918 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002919}
2920
2921static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2922{
2923 u32 val;
2924
Eilon Greenstein0626b892009-02-12 08:38:14 +00002925 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002926
2927 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2928 BNX2X_ERR("DB hw attention 0x%x\n", val);
2929 /* DORQ discard attention */
2930 if (val & 0x2)
2931 BNX2X_ERR("FATAL error from DORQ\n");
2932 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002933
2934 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2935
2936 int port = BP_PORT(bp);
2937 int reg_offset;
2938
2939 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2940 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2941
2942 val = REG_RD(bp, reg_offset);
2943 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2944 REG_WR(bp, reg_offset, val);
2945
2946 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002947 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002948 bnx2x_panic();
2949 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002950}
2951
2952static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2953{
2954 u32 val;
2955
2956 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2957
2958 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2959 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2960 /* CFC error attention */
2961 if (val & 0x2)
2962 BNX2X_ERR("FATAL error from CFC\n");
2963 }
2964
2965 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2966
2967 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2968 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2969 /* RQ_USDMDP_FIFO_OVERFLOW */
2970 if (val & 0x18000)
2971 BNX2X_ERR("FATAL error from PXP\n");
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002972 if (CHIP_IS_E2(bp)) {
2973 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
2974 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
2975 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002976 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002977
2978 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2979
2980 int port = BP_PORT(bp);
2981 int reg_offset;
2982
2983 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2984 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2985
2986 val = REG_RD(bp, reg_offset);
2987 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2988 REG_WR(bp, reg_offset, val);
2989
2990 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002991 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002992 bnx2x_panic();
2993 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002994}
2995
2996static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2997{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002998 u32 val;
2999
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003000 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3001
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003002 if (attn & BNX2X_PMF_LINK_ASSERT) {
3003 int func = BP_FUNC(bp);
3004
3005 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003006 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3007 func_mf_config[BP_ABS_FUNC(bp)].config);
3008 val = SHMEM_RD(bp,
3009 func_mb[BP_FW_MB_IDX(bp)].drv_status);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003010 if (val & DRV_STATUS_DCC_EVENT_MASK)
3011 bnx2x_dcc_event(bp,
3012 (val & DRV_STATUS_DCC_EVENT_MASK));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003013 bnx2x__link_status_update(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003014 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003015 bnx2x_pmf_update(bp);
3016
3017 } else if (attn & BNX2X_MC_ASSERT_BITS) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003018
3019 BNX2X_ERR("MC assert!\n");
3020 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3021 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3022 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3023 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3024 bnx2x_panic();
3025
3026 } else if (attn & BNX2X_MCP_ASSERT) {
3027
3028 BNX2X_ERR("MCP assert!\n");
3029 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003030 bnx2x_fw_dump(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003031
3032 } else
3033 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3034 }
3035
3036 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003037 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3038 if (attn & BNX2X_GRC_TIMEOUT) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003039 val = CHIP_IS_E1(bp) ? 0 :
3040 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003041 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3042 }
3043 if (attn & BNX2X_GRC_RSV) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003044 val = CHIP_IS_E1(bp) ? 0 :
3045 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003046 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3047 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003048 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003049 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003050}
3051
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003052#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3053#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3054#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3055#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3056#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3057#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3058/*
3059 * should be run under rtnl lock
3060 */
3061static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3062{
3063 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3064 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3065 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3066 barrier();
3067 mmiowb();
3068}
3069
3070/*
3071 * should be run under rtnl lock
3072 */
3073static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3074{
3075 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3076 val |= (1 << 16);
3077 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3078 barrier();
3079 mmiowb();
3080}
3081
3082/*
3083 * should be run under rtnl lock
3084 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003085bool bnx2x_reset_is_done(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003086{
3087 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3088 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3089 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3090}
3091
3092/*
3093 * should be run under rtnl lock
3094 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003095inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003096{
3097 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3098
3099 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3100
3101 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3102 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3103 barrier();
3104 mmiowb();
3105}
3106
3107/*
3108 * should be run under rtnl lock
3109 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003110u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003111{
3112 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3113
3114 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3115
3116 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3117 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3118 barrier();
3119 mmiowb();
3120
3121 return val1;
3122}
3123
3124/*
3125 * should be run under rtnl lock
3126 */
3127static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3128{
3129 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3130}
3131
3132static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3133{
3134 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3135 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3136}
3137
3138static inline void _print_next_block(int idx, const char *blk)
3139{
3140 if (idx)
3141 pr_cont(", ");
3142 pr_cont("%s", blk);
3143}
3144
3145static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3146{
3147 int i = 0;
3148 u32 cur_bit = 0;
3149 for (i = 0; sig; i++) {
3150 cur_bit = ((u32)0x1 << i);
3151 if (sig & cur_bit) {
3152 switch (cur_bit) {
3153 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3154 _print_next_block(par_num++, "BRB");
3155 break;
3156 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3157 _print_next_block(par_num++, "PARSER");
3158 break;
3159 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3160 _print_next_block(par_num++, "TSDM");
3161 break;
3162 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3163 _print_next_block(par_num++, "SEARCHER");
3164 break;
3165 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3166 _print_next_block(par_num++, "TSEMI");
3167 break;
3168 }
3169
3170 /* Clear the bit */
3171 sig &= ~cur_bit;
3172 }
3173 }
3174
3175 return par_num;
3176}
3177
3178static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3179{
3180 int i = 0;
3181 u32 cur_bit = 0;
3182 for (i = 0; sig; i++) {
3183 cur_bit = ((u32)0x1 << i);
3184 if (sig & cur_bit) {
3185 switch (cur_bit) {
3186 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3187 _print_next_block(par_num++, "PBCLIENT");
3188 break;
3189 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3190 _print_next_block(par_num++, "QM");
3191 break;
3192 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3193 _print_next_block(par_num++, "XSDM");
3194 break;
3195 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3196 _print_next_block(par_num++, "XSEMI");
3197 break;
3198 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3199 _print_next_block(par_num++, "DOORBELLQ");
3200 break;
3201 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3202 _print_next_block(par_num++, "VAUX PCI CORE");
3203 break;
3204 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3205 _print_next_block(par_num++, "DEBUG");
3206 break;
3207 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3208 _print_next_block(par_num++, "USDM");
3209 break;
3210 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3211 _print_next_block(par_num++, "USEMI");
3212 break;
3213 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3214 _print_next_block(par_num++, "UPB");
3215 break;
3216 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3217 _print_next_block(par_num++, "CSDM");
3218 break;
3219 }
3220
3221 /* Clear the bit */
3222 sig &= ~cur_bit;
3223 }
3224 }
3225
3226 return par_num;
3227}
3228
3229static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3230{
3231 int i = 0;
3232 u32 cur_bit = 0;
3233 for (i = 0; sig; i++) {
3234 cur_bit = ((u32)0x1 << i);
3235 if (sig & cur_bit) {
3236 switch (cur_bit) {
3237 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3238 _print_next_block(par_num++, "CSEMI");
3239 break;
3240 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3241 _print_next_block(par_num++, "PXP");
3242 break;
3243 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3244 _print_next_block(par_num++,
3245 "PXPPCICLOCKCLIENT");
3246 break;
3247 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3248 _print_next_block(par_num++, "CFC");
3249 break;
3250 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3251 _print_next_block(par_num++, "CDU");
3252 break;
3253 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3254 _print_next_block(par_num++, "IGU");
3255 break;
3256 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3257 _print_next_block(par_num++, "MISC");
3258 break;
3259 }
3260
3261 /* Clear the bit */
3262 sig &= ~cur_bit;
3263 }
3264 }
3265
3266 return par_num;
3267}
3268
3269static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3270{
3271 int i = 0;
3272 u32 cur_bit = 0;
3273 for (i = 0; sig; i++) {
3274 cur_bit = ((u32)0x1 << i);
3275 if (sig & cur_bit) {
3276 switch (cur_bit) {
3277 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3278 _print_next_block(par_num++, "MCP ROM");
3279 break;
3280 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3281 _print_next_block(par_num++, "MCP UMP RX");
3282 break;
3283 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3284 _print_next_block(par_num++, "MCP UMP TX");
3285 break;
3286 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3287 _print_next_block(par_num++, "MCP SCPAD");
3288 break;
3289 }
3290
3291 /* Clear the bit */
3292 sig &= ~cur_bit;
3293 }
3294 }
3295
3296 return par_num;
3297}
3298
3299static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3300 u32 sig2, u32 sig3)
3301{
3302 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3303 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3304 int par_num = 0;
3305 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3306 "[0]:0x%08x [1]:0x%08x "
3307 "[2]:0x%08x [3]:0x%08x\n",
3308 sig0 & HW_PRTY_ASSERT_SET_0,
3309 sig1 & HW_PRTY_ASSERT_SET_1,
3310 sig2 & HW_PRTY_ASSERT_SET_2,
3311 sig3 & HW_PRTY_ASSERT_SET_3);
3312 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3313 bp->dev->name);
3314 par_num = bnx2x_print_blocks_with_parity0(
3315 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3316 par_num = bnx2x_print_blocks_with_parity1(
3317 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3318 par_num = bnx2x_print_blocks_with_parity2(
3319 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3320 par_num = bnx2x_print_blocks_with_parity3(
3321 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3322 printk("\n");
3323 return true;
3324 } else
3325 return false;
3326}
3327
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003328bool bnx2x_chk_parity_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003329{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003330 struct attn_route attn;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003331 int port = BP_PORT(bp);
3332
3333 attn.sig[0] = REG_RD(bp,
3334 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3335 port*4);
3336 attn.sig[1] = REG_RD(bp,
3337 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3338 port*4);
3339 attn.sig[2] = REG_RD(bp,
3340 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3341 port*4);
3342 attn.sig[3] = REG_RD(bp,
3343 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3344 port*4);
3345
3346 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3347 attn.sig[3]);
3348}
3349
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003350
3351static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3352{
3353 u32 val;
3354 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3355
3356 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3357 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3358 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3359 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3360 "ADDRESS_ERROR\n");
3361 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3362 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3363 "INCORRECT_RCV_BEHAVIOR\n");
3364 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3365 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3366 "WAS_ERROR_ATTN\n");
3367 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3368 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3369 "VF_LENGTH_VIOLATION_ATTN\n");
3370 if (val &
3371 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3372 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3373 "VF_GRC_SPACE_VIOLATION_ATTN\n");
3374 if (val &
3375 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3376 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3377 "VF_MSIX_BAR_VIOLATION_ATTN\n");
3378 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3379 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3380 "TCPL_ERROR_ATTN\n");
3381 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3382 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3383 "TCPL_IN_TWO_RCBS_ATTN\n");
3384 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3385 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3386 "CSSNOOP_FIFO_OVERFLOW\n");
3387 }
3388 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3389 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3390 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3391 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3392 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3393 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3394 BNX2X_ERR("ATC_ATC_INT_STS_REG"
3395 "_ATC_TCPL_TO_NOT_PEND\n");
3396 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3397 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3398 "ATC_GPA_MULTIPLE_HITS\n");
3399 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3400 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3401 "ATC_RCPL_TO_EMPTY_CNT\n");
3402 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3403 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3404 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3405 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3406 "ATC_IREQ_LESS_THAN_STU\n");
3407 }
3408
3409 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3410 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3411 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3412 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3413 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3414 }
3415
3416}
3417
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003418static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3419{
3420 struct attn_route attn, *group_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003421 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003422 int index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003423 u32 reg_addr;
3424 u32 val;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003425 u32 aeu_mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003426
3427 /* need to take HW lock because MCP or other port might also
3428 try to handle this event */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003429 bnx2x_acquire_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003430
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003431 if (bnx2x_chk_parity_attn(bp)) {
3432 bp->recovery_state = BNX2X_RECOVERY_INIT;
3433 bnx2x_set_reset_in_progress(bp);
3434 schedule_delayed_work(&bp->reset_task, 0);
3435 /* Disable HW interrupts */
3436 bnx2x_int_disable(bp);
3437 bnx2x_release_alr(bp);
3438 /* In case of parity errors don't handle attentions so that
3439 * other function would "see" parity errors.
3440 */
3441 return;
3442 }
3443
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003444 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3445 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3446 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3447 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003448 if (CHIP_IS_E2(bp))
3449 attn.sig[4] =
3450 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3451 else
3452 attn.sig[4] = 0;
3453
3454 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3455 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003456
3457 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3458 if (deasserted & (1 << index)) {
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003459 group_mask = &bp->attn_group[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003460
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003461 DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3462 "%08x %08x %08x\n",
3463 index,
3464 group_mask->sig[0], group_mask->sig[1],
3465 group_mask->sig[2], group_mask->sig[3],
3466 group_mask->sig[4]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003467
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003468 bnx2x_attn_int_deasserted4(bp,
3469 attn.sig[4] & group_mask->sig[4]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003470 bnx2x_attn_int_deasserted3(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003471 attn.sig[3] & group_mask->sig[3]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003472 bnx2x_attn_int_deasserted1(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003473 attn.sig[1] & group_mask->sig[1]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003474 bnx2x_attn_int_deasserted2(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003475 attn.sig[2] & group_mask->sig[2]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003476 bnx2x_attn_int_deasserted0(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003477 attn.sig[0] & group_mask->sig[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003478 }
3479 }
3480
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003481 bnx2x_release_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003482
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003483 if (bp->common.int_block == INT_BLOCK_HC)
3484 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3485 COMMAND_REG_ATTN_BITS_CLR);
3486 else
3487 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003488
3489 val = ~deasserted;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003490 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3491 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
Eilon Greenstein5c862842008-08-13 15:51:48 -07003492 REG_WR(bp, reg_addr, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003493
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003494 if (~bp->attn_state & deasserted)
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003495 BNX2X_ERR("IGU ERROR\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003496
3497 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3498 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3499
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003500 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3501 aeu_mask = REG_RD(bp, reg_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003502
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003503 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3504 aeu_mask, deasserted);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003505 aeu_mask |= (deasserted & 0x3ff);
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003506 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3507
3508 REG_WR(bp, reg_addr, aeu_mask);
3509 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003510
3511 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3512 bp->attn_state &= ~deasserted;
3513 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3514}
3515
3516static void bnx2x_attn_int(struct bnx2x *bp)
3517{
3518 /* read local copy of bits */
Eilon Greenstein68d59482009-01-14 21:27:36 -08003519 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3520 attn_bits);
3521 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3522 attn_bits_ack);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003523 u32 attn_state = bp->attn_state;
3524
3525 /* look for changed bits */
3526 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3527 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3528
3529 DP(NETIF_MSG_HW,
3530 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3531 attn_bits, attn_ack, asserted, deasserted);
3532
3533 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003534 BNX2X_ERR("BAD attention state\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003535
3536 /* handle bits that were raised */
3537 if (asserted)
3538 bnx2x_attn_int_asserted(bp, asserted);
3539
3540 if (deasserted)
3541 bnx2x_attn_int_deasserted(bp, deasserted);
3542}
3543
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003544static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3545{
3546 /* No memory barriers */
3547 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3548 mmiowb(); /* keep prod updates ordered */
3549}
3550
3551#ifdef BCM_CNIC
3552static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3553 union event_ring_elem *elem)
3554{
3555 if (!bp->cnic_eth_dev.starting_cid ||
3556 cid < bp->cnic_eth_dev.starting_cid)
3557 return 1;
3558
3559 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3560
3561 if (unlikely(elem->message.data.cfc_del_event.error)) {
3562 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3563 cid);
3564 bnx2x_panic_dump(bp);
3565 }
3566 bnx2x_cnic_cfc_comp(bp, cid);
3567 return 0;
3568}
3569#endif
3570
3571static void bnx2x_eq_int(struct bnx2x *bp)
3572{
3573 u16 hw_cons, sw_cons, sw_prod;
3574 union event_ring_elem *elem;
3575 u32 cid;
3576 u8 opcode;
3577 int spqe_cnt = 0;
3578
3579 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3580
3581 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3582 * when we get the the next-page we nned to adjust so the loop
3583 * condition below will be met. The next element is the size of a
3584 * regular element and hence incrementing by 1
3585 */
3586 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3587 hw_cons++;
3588
3589 /* This function may never run in parralel with itself for a
3590 * specific bp, thus there is no need in "paired" read memory
3591 * barrier here.
3592 */
3593 sw_cons = bp->eq_cons;
3594 sw_prod = bp->eq_prod;
3595
3596 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n",
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00003597 hw_cons, sw_cons, atomic_read(&bp->spq_left));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003598
3599 for (; sw_cons != hw_cons;
3600 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3601
3602
3603 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3604
3605 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3606 opcode = elem->message.opcode;
3607
3608
3609 /* handle eq element */
3610 switch (opcode) {
3611 case EVENT_RING_OPCODE_STAT_QUERY:
3612 DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3613 /* nothing to do with stats comp */
3614 continue;
3615
3616 case EVENT_RING_OPCODE_CFC_DEL:
3617 /* handle according to cid range */
3618 /*
3619 * we may want to verify here that the bp state is
3620 * HALTING
3621 */
3622 DP(NETIF_MSG_IFDOWN,
3623 "got delete ramrod for MULTI[%d]\n", cid);
3624#ifdef BCM_CNIC
3625 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3626 goto next_spqe;
3627#endif
3628 bnx2x_fp(bp, cid, state) =
3629 BNX2X_FP_STATE_CLOSED;
3630
3631 goto next_spqe;
3632 }
3633
3634 switch (opcode | bp->state) {
3635 case (EVENT_RING_OPCODE_FUNCTION_START |
3636 BNX2X_STATE_OPENING_WAIT4_PORT):
3637 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3638 bp->state = BNX2X_STATE_FUNC_STARTED;
3639 break;
3640
3641 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3642 BNX2X_STATE_CLOSING_WAIT4_HALT):
3643 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3644 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3645 break;
3646
3647 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3648 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3649 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3650 bp->set_mac_pending = 0;
3651 break;
3652
3653 case (EVENT_RING_OPCODE_SET_MAC |
3654 BNX2X_STATE_CLOSING_WAIT4_HALT):
3655 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3656 bp->set_mac_pending = 0;
3657 break;
3658 default:
3659 /* unknown event log error and continue */
3660 BNX2X_ERR("Unknown EQ event %d\n",
3661 elem->message.opcode);
3662 }
3663next_spqe:
3664 spqe_cnt++;
3665 } /* for */
3666
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00003667 smp_mb__before_atomic_inc();
3668 atomic_add(spqe_cnt, &bp->spq_left);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003669
3670 bp->eq_cons = sw_cons;
3671 bp->eq_prod = sw_prod;
3672 /* Make sure that above mem writes were issued towards the memory */
3673 smp_wmb();
3674
3675 /* update producer */
3676 bnx2x_update_eq_prod(bp, bp->eq_prod);
3677}
3678
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003679static void bnx2x_sp_task(struct work_struct *work)
3680{
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003681 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003682 u16 status;
3683
3684 /* Return here if interrupt is disabled */
3685 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003686 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003687 return;
3688 }
3689
3690 status = bnx2x_update_dsb_idx(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003691/* if (status == 0) */
3692/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003693
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003694 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003695
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003696 /* HW attentions */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003697 if (status & BNX2X_DEF_SB_ATT_IDX) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003698 bnx2x_attn_int(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003699 status &= ~BNX2X_DEF_SB_ATT_IDX;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003700 }
3701
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003702 /* SP events: STAT_QUERY and others */
3703 if (status & BNX2X_DEF_SB_IDX) {
3704
3705 /* Handle EQ completions */
3706 bnx2x_eq_int(bp);
3707
3708 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3709 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3710
3711 status &= ~BNX2X_DEF_SB_IDX;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003712 }
3713
3714 if (unlikely(status))
3715 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3716 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003717
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003718 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3719 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003720}
3721
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003722irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003723{
3724 struct net_device *dev = dev_instance;
3725 struct bnx2x *bp = netdev_priv(dev);
3726
3727 /* Return here if interrupt is disabled */
3728 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003729 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003730 return IRQ_HANDLED;
3731 }
3732
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003733 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3734 IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003735
3736#ifdef BNX2X_STOP_ON_ERROR
3737 if (unlikely(bp->panic))
3738 return IRQ_HANDLED;
3739#endif
3740
Michael Chan993ac7b2009-10-10 13:46:56 +00003741#ifdef BCM_CNIC
3742 {
3743 struct cnic_ops *c_ops;
3744
3745 rcu_read_lock();
3746 c_ops = rcu_dereference(bp->cnic_ops);
3747 if (c_ops)
3748 c_ops->cnic_handler(bp->cnic_data, NULL);
3749 rcu_read_unlock();
3750 }
3751#endif
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003752 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003753
3754 return IRQ_HANDLED;
3755}
3756
3757/* end of slow path */
3758
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003759static void bnx2x_timer(unsigned long data)
3760{
3761 struct bnx2x *bp = (struct bnx2x *) data;
3762
3763 if (!netif_running(bp->dev))
3764 return;
3765
3766 if (atomic_read(&bp->intr_sem) != 0)
Eliezer Tamirf1410642008-02-28 11:51:50 -08003767 goto timer_restart;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003768
3769 if (poll) {
3770 struct bnx2x_fastpath *fp = &bp->fp[0];
3771 int rc;
3772
Eilon Greenstein7961f792009-03-02 07:59:31 +00003773 bnx2x_tx_int(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003774 rc = bnx2x_rx_int(fp, 1000);
3775 }
3776
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003777 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003778 int mb_idx = BP_FW_MB_IDX(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003779 u32 drv_pulse;
3780 u32 mcp_pulse;
3781
3782 ++bp->fw_drv_pulse_wr_seq;
3783 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3784 /* TBD - add SYSTEM_TIME */
3785 drv_pulse = bp->fw_drv_pulse_wr_seq;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003786 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003787
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003788 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003789 MCP_PULSE_SEQ_MASK);
3790 /* The delta between driver pulse and mcp response
3791 * should be 1 (before mcp response) or 0 (after mcp response)
3792 */
3793 if ((drv_pulse != mcp_pulse) &&
3794 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3795 /* someone lost a heartbeat... */
3796 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3797 drv_pulse, mcp_pulse);
3798 }
3799 }
3800
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07003801 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003802 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003803
Eliezer Tamirf1410642008-02-28 11:51:50 -08003804timer_restart:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003805 mod_timer(&bp->timer, jiffies + bp->current_interval);
3806}
3807
3808/* end of Statistics */
3809
3810/* nic init */
3811
3812/*
3813 * nic init service functions
3814 */
3815
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003816static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003817{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003818 u32 i;
3819 if (!(len%4) && !(addr%4))
3820 for (i = 0; i < len; i += 4)
3821 REG_WR(bp, addr + i, fill);
3822 else
3823 for (i = 0; i < len; i++)
3824 REG_WR8(bp, addr + i, fill);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003825
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003826}
3827
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003828/* helper: writes FP SP data to FW - data_size in dwords */
3829static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3830 int fw_sb_id,
3831 u32 *sb_data_p,
3832 u32 data_size)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003833{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003834 int index;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003835 for (index = 0; index < data_size; index++)
3836 REG_WR(bp, BAR_CSTRORM_INTMEM +
3837 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3838 sizeof(u32)*index,
3839 *(sb_data_p + index));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003840}
3841
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003842static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3843{
3844 u32 *sb_data_p;
3845 u32 data_size = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003846 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003847 struct hc_status_block_data_e1x sb_data_e1x;
3848
3849 /* disable the function first */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003850 if (CHIP_IS_E2(bp)) {
3851 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3852 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3853 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3854 sb_data_e2.common.p_func.vf_valid = false;
3855 sb_data_p = (u32 *)&sb_data_e2;
3856 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3857 } else {
3858 memset(&sb_data_e1x, 0,
3859 sizeof(struct hc_status_block_data_e1x));
3860 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3861 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3862 sb_data_e1x.common.p_func.vf_valid = false;
3863 sb_data_p = (u32 *)&sb_data_e1x;
3864 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3865 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003866 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3867
3868 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3869 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
3870 CSTORM_STATUS_BLOCK_SIZE);
3871 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3872 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
3873 CSTORM_SYNC_BLOCK_SIZE);
3874}
3875
3876/* helper: writes SP SB data to FW */
3877static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
3878 struct hc_sp_status_block_data *sp_sb_data)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003879{
3880 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003881 int i;
3882 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
3883 REG_WR(bp, BAR_CSTRORM_INTMEM +
3884 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
3885 i*sizeof(u32),
3886 *((u32 *)sp_sb_data + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003887}
3888
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003889static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
3890{
3891 int func = BP_FUNC(bp);
3892 struct hc_sp_status_block_data sp_sb_data;
3893 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
3894
3895 sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
3896 sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
3897 sp_sb_data.p_func.vf_valid = false;
3898
3899 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
3900
3901 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3902 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
3903 CSTORM_SP_STATUS_BLOCK_SIZE);
3904 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3905 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
3906 CSTORM_SP_SYNC_BLOCK_SIZE);
3907
3908}
3909
3910
3911static inline
3912void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
3913 int igu_sb_id, int igu_seg_id)
3914{
3915 hc_sm->igu_sb_id = igu_sb_id;
3916 hc_sm->igu_seg_id = igu_seg_id;
3917 hc_sm->timer_value = 0xFF;
3918 hc_sm->time_to_expire = 0xFFFFFFFF;
3919}
3920
3921void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
3922 u8 vf_valid, int fw_sb_id, int igu_sb_id)
3923{
3924 int igu_seg_id;
3925
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003926 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003927 struct hc_status_block_data_e1x sb_data_e1x;
3928 struct hc_status_block_sm *hc_sm_p;
3929 struct hc_index_data *hc_index_p;
3930 int data_size;
3931 u32 *sb_data_p;
3932
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003933 if (CHIP_INT_MODE_IS_BC(bp))
3934 igu_seg_id = HC_SEG_ACCESS_NORM;
3935 else
3936 igu_seg_id = IGU_SEG_ACCESS_NORM;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003937
3938 bnx2x_zero_fp_sb(bp, fw_sb_id);
3939
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003940 if (CHIP_IS_E2(bp)) {
3941 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3942 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
3943 sb_data_e2.common.p_func.vf_id = vfid;
3944 sb_data_e2.common.p_func.vf_valid = vf_valid;
3945 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
3946 sb_data_e2.common.same_igu_sb_1b = true;
3947 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
3948 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
3949 hc_sm_p = sb_data_e2.common.state_machine;
3950 hc_index_p = sb_data_e2.index_data;
3951 sb_data_p = (u32 *)&sb_data_e2;
3952 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3953 } else {
3954 memset(&sb_data_e1x, 0,
3955 sizeof(struct hc_status_block_data_e1x));
3956 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
3957 sb_data_e1x.common.p_func.vf_id = 0xff;
3958 sb_data_e1x.common.p_func.vf_valid = false;
3959 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
3960 sb_data_e1x.common.same_igu_sb_1b = true;
3961 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
3962 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
3963 hc_sm_p = sb_data_e1x.common.state_machine;
3964 hc_index_p = sb_data_e1x.index_data;
3965 sb_data_p = (u32 *)&sb_data_e1x;
3966 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3967 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003968
3969 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
3970 igu_sb_id, igu_seg_id);
3971 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
3972 igu_sb_id, igu_seg_id);
3973
3974 DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
3975
3976 /* write indecies to HW */
3977 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3978}
3979
3980static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
3981 u8 sb_index, u8 disable, u16 usec)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003982{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003983 int port = BP_PORT(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003984 u8 ticks = usec / BNX2X_BTR;
3985
3986 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3987
3988 disable = disable ? 1 : (usec ? 0 : 1);
3989 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
3990}
3991
3992static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
3993 u16 tx_usec, u16 rx_usec)
3994{
3995 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
3996 false, rx_usec);
3997 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
3998 false, tx_usec);
3999}
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004000
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004001static void bnx2x_init_def_sb(struct bnx2x *bp)
4002{
4003 struct host_sp_status_block *def_sb = bp->def_status_blk;
4004 dma_addr_t mapping = bp->def_status_blk_mapping;
4005 int igu_sp_sb_index;
4006 int igu_seg_id;
4007 int port = BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004008 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004009 int reg_offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004010 u64 section;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004011 int index;
4012 struct hc_sp_status_block_data sp_sb_data;
4013 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4014
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004015 if (CHIP_INT_MODE_IS_BC(bp)) {
4016 igu_sp_sb_index = DEF_SB_IGU_ID;
4017 igu_seg_id = HC_SEG_ACCESS_DEF;
4018 } else {
4019 igu_sp_sb_index = bp->igu_dsb_id;
4020 igu_seg_id = IGU_SEG_ACCESS_DEF;
4021 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004022
4023 /* ATTN */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004024 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004025 atten_status_block);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004026 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004027
Eliezer Tamir49d66772008-02-28 11:53:13 -08004028 bp->attn_state = 0;
4029
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004030 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4031 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004032 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004033 int sindex;
4034 /* take care of sig[0]..sig[4] */
4035 for (sindex = 0; sindex < 4; sindex++)
4036 bp->attn_group[index].sig[sindex] =
4037 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004038
4039 if (CHIP_IS_E2(bp))
4040 /*
4041 * enable5 is separate from the rest of the registers,
4042 * and therefore the address skip is 4
4043 * and not 16 between the different groups
4044 */
4045 bp->attn_group[index].sig[4] = REG_RD(bp,
4046 reg_offset + 0x10 + 0x4*index);
4047 else
4048 bp->attn_group[index].sig[4] = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004049 }
4050
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004051 if (bp->common.int_block == INT_BLOCK_HC) {
4052 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4053 HC_REG_ATTN_MSG0_ADDR_L);
4054
4055 REG_WR(bp, reg_offset, U64_LO(section));
4056 REG_WR(bp, reg_offset + 4, U64_HI(section));
4057 } else if (CHIP_IS_E2(bp)) {
4058 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4059 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4060 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004061
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004062 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4063 sp_sb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004064
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004065 bnx2x_zero_sp_sb(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004066
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004067 sp_sb_data.host_sb_addr.lo = U64_LO(section);
4068 sp_sb_data.host_sb_addr.hi = U64_HI(section);
4069 sp_sb_data.igu_sb_id = igu_sp_sb_index;
4070 sp_sb_data.igu_seg_id = igu_seg_id;
4071 sp_sb_data.p_func.pf_id = func;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004072 sp_sb_data.p_func.vnic_id = BP_VN(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004073 sp_sb_data.p_func.vf_id = 0xff;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004074
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004075 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004076
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004077 bp->stats_pending = 0;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004078 bp->set_mac_pending = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004079
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004080 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004081}
4082
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004083void bnx2x_update_coalesce(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004084{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004085 int i;
4086
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004087 for_each_queue(bp, i)
4088 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
4089 bp->rx_ticks, bp->tx_ticks);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004090}
4091
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004092static void bnx2x_init_sp_ring(struct bnx2x *bp)
4093{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004094 spin_lock_init(&bp->spq_lock);
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00004095 atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004096
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004097 bp->spq_prod_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004098 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4099 bp->spq_prod_bd = bp->spq;
4100 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004101}
4102
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004103static void bnx2x_init_eq_ring(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004104{
4105 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004106 for (i = 1; i <= NUM_EQ_PAGES; i++) {
4107 union event_ring_elem *elem =
4108 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004109
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004110 elem->next_page.addr.hi =
4111 cpu_to_le32(U64_HI(bp->eq_mapping +
4112 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
4113 elem->next_page.addr.lo =
4114 cpu_to_le32(U64_LO(bp->eq_mapping +
4115 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004116 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004117 bp->eq_cons = 0;
4118 bp->eq_prod = NUM_EQ_DESC;
4119 bp->eq_cons_sb = BNX2X_EQ_INDEX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004120}
4121
4122static void bnx2x_init_ind_table(struct bnx2x *bp)
4123{
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08004124 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004125 int i;
4126
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004127 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004128 return;
4129
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004130 DP(NETIF_MSG_IFUP,
4131 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004132 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004133 REG_WR8(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08004134 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004135 bp->fp->cl_id + (i % bp->num_queues));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004136}
4137
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004138void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004139{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004140 int mode = bp->rx_mode;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004141 u16 cl_id;
4142
Eilon Greenstein581ce432009-07-29 00:20:04 +00004143 /* All but management unicast packets should pass to the host as well */
4144 u32 llh_mask =
4145 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4146 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4147 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4148 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004149
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004150 switch (mode) {
4151 case BNX2X_RX_MODE_NONE: /* no Rx */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004152 cl_id = BP_L_ID(bp);
4153 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004154 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004155
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004156 case BNX2X_RX_MODE_NORMAL:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004157 cl_id = BP_L_ID(bp);
4158 bnx2x_rxq_set_mac_filters(bp, cl_id,
4159 BNX2X_ACCEPT_UNICAST |
4160 BNX2X_ACCEPT_BROADCAST |
4161 BNX2X_ACCEPT_MULTICAST);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004162 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004163
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004164 case BNX2X_RX_MODE_ALLMULTI:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004165 cl_id = BP_L_ID(bp);
4166 bnx2x_rxq_set_mac_filters(bp, cl_id,
4167 BNX2X_ACCEPT_UNICAST |
4168 BNX2X_ACCEPT_BROADCAST |
4169 BNX2X_ACCEPT_ALL_MULTICAST);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004170 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004171
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004172 case BNX2X_RX_MODE_PROMISC:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004173 cl_id = BP_L_ID(bp);
4174 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_PROMISCUOUS_MODE);
4175
Eilon Greenstein581ce432009-07-29 00:20:04 +00004176 /* pass management unicast packets as well */
4177 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004178 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004179
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004180 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004181 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4182 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004183 }
4184
Eilon Greenstein581ce432009-07-29 00:20:04 +00004185 REG_WR(bp,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004186 BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
4187 NIG_REG_LLH0_BRB1_DRV_MASK,
Eilon Greenstein581ce432009-07-29 00:20:04 +00004188 llh_mask);
4189
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004190 DP(NETIF_MSG_IFUP, "rx mode %d\n"
4191 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
4192 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n", mode,
4193 bp->mac_filters.ucast_drop_all,
4194 bp->mac_filters.mcast_drop_all,
4195 bp->mac_filters.bcast_drop_all,
4196 bp->mac_filters.ucast_accept_all,
4197 bp->mac_filters.mcast_accept_all,
4198 bp->mac_filters.bcast_accept_all
4199 );
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004200
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004201 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004202}
4203
Eilon Greenstein471de712008-08-13 15:49:35 -07004204static void bnx2x_init_internal_common(struct bnx2x *bp)
4205{
4206 int i;
4207
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004208 if (!CHIP_IS_E1(bp)) {
4209
4210 /* xstorm needs to know whether to add ovlan to packets or not,
4211 * in switch-independent we'll write 0 to here... */
4212 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004213 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004214 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004215 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004216 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004217 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004218 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004219 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004220 }
4221
Eilon Greenstein471de712008-08-13 15:49:35 -07004222 /* Zero this manually as its initialization is
4223 currently missing in the initTool */
4224 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4225 REG_WR(bp, BAR_USTRORM_INTMEM +
4226 USTORM_AGG_DATA_OFFSET + i * 4, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004227 if (CHIP_IS_E2(bp)) {
4228 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
4229 CHIP_INT_MODE_IS_BC(bp) ?
4230 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
4231 }
Eilon Greenstein471de712008-08-13 15:49:35 -07004232}
4233
4234static void bnx2x_init_internal_port(struct bnx2x *bp)
4235{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004236 /* port */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004237}
4238
Eilon Greenstein471de712008-08-13 15:49:35 -07004239static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4240{
4241 switch (load_code) {
4242 case FW_MSG_CODE_DRV_LOAD_COMMON:
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004243 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
Eilon Greenstein471de712008-08-13 15:49:35 -07004244 bnx2x_init_internal_common(bp);
4245 /* no break */
4246
4247 case FW_MSG_CODE_DRV_LOAD_PORT:
4248 bnx2x_init_internal_port(bp);
4249 /* no break */
4250
4251 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004252 /* internal memory per function is
4253 initialized inside bnx2x_pf_init */
Eilon Greenstein471de712008-08-13 15:49:35 -07004254 break;
4255
4256 default:
4257 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4258 break;
4259 }
4260}
4261
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004262static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
4263{
4264 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
4265
4266 fp->state = BNX2X_FP_STATE_CLOSED;
4267
4268 fp->index = fp->cid = fp_idx;
4269 fp->cl_id = BP_L_ID(bp) + fp_idx;
4270 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
4271 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
4272 /* qZone id equals to FW (per path) client id */
4273 fp->cl_qzone_id = fp->cl_id +
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004274 BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4275 ETH_MAX_RX_CLIENTS_E1H);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004276 /* init shortcut */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004277 fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4278 USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004279 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4280 /* Setup SB indicies */
4281 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4282 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4283
4284 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
4285 "cl_id %d fw_sb %d igu_sb %d\n",
4286 fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4287 fp->igu_sb_id);
4288 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4289 fp->fw_sb_id, fp->igu_sb_id);
4290
4291 bnx2x_update_fpsb_idx(fp);
4292}
4293
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004294void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004295{
4296 int i;
4297
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004298 for_each_queue(bp, i)
4299 bnx2x_init_fp_sb(bp, i);
Michael Chan37b091b2009-10-10 13:46:55 +00004300#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004301
4302 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4303 BNX2X_VF_ID_INVALID, false,
4304 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4305
Michael Chan37b091b2009-10-10 13:46:55 +00004306#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004307
Eilon Greenstein16119782009-03-02 07:59:27 +00004308 /* ensure status block indices were read */
4309 rmb();
4310
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004311 bnx2x_init_def_sb(bp);
Eilon Greenstein5c862842008-08-13 15:51:48 -07004312 bnx2x_update_dsb_idx(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004313 bnx2x_init_rx_rings(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004314 bnx2x_init_tx_rings(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004315 bnx2x_init_sp_ring(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004316 bnx2x_init_eq_ring(bp);
Eilon Greenstein471de712008-08-13 15:49:35 -07004317 bnx2x_init_internal(bp, load_code);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004318 bnx2x_pf_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004319 bnx2x_init_ind_table(bp);
Eilon Greenstein0ef00452009-01-14 21:31:08 -08004320 bnx2x_stats_init(bp);
4321
4322 /* At this point, we are ready for interrupts */
4323 atomic_set(&bp->intr_sem, 0);
4324
4325 /* flush all before enabling interrupts */
4326 mb();
4327 mmiowb();
4328
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08004329 bnx2x_int_enable(bp);
Eilon Greensteineb8da202009-07-21 05:47:30 +00004330
4331 /* Check for SPIO5 */
4332 bnx2x_attn_int_deasserted0(bp,
4333 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4334 AEU_INPUTS_ATTN_BITS_SPIO5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004335}
4336
4337/* end of nic init */
4338
4339/*
4340 * gzip service functions
4341 */
4342
4343static int bnx2x_gunzip_init(struct bnx2x *bp)
4344{
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004345 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4346 &bp->gunzip_mapping, GFP_KERNEL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004347 if (bp->gunzip_buf == NULL)
4348 goto gunzip_nomem1;
4349
4350 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4351 if (bp->strm == NULL)
4352 goto gunzip_nomem2;
4353
4354 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4355 GFP_KERNEL);
4356 if (bp->strm->workspace == NULL)
4357 goto gunzip_nomem3;
4358
4359 return 0;
4360
4361gunzip_nomem3:
4362 kfree(bp->strm);
4363 bp->strm = NULL;
4364
4365gunzip_nomem2:
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004366 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4367 bp->gunzip_mapping);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004368 bp->gunzip_buf = NULL;
4369
4370gunzip_nomem1:
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004371 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4372 " un-compression\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004373 return -ENOMEM;
4374}
4375
4376static void bnx2x_gunzip_end(struct bnx2x *bp)
4377{
4378 kfree(bp->strm->workspace);
4379
4380 kfree(bp->strm);
4381 bp->strm = NULL;
4382
4383 if (bp->gunzip_buf) {
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004384 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4385 bp->gunzip_mapping);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004386 bp->gunzip_buf = NULL;
4387 }
4388}
4389
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004390static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004391{
4392 int n, rc;
4393
4394 /* check gzip header */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004395 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4396 BNX2X_ERR("Bad gzip header\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004397 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004398 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004399
4400 n = 10;
4401
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004402#define FNAME 0x8
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004403
4404 if (zbuf[3] & FNAME)
4405 while ((zbuf[n++] != 0) && (n < len));
4406
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004407 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004408 bp->strm->avail_in = len - n;
4409 bp->strm->next_out = bp->gunzip_buf;
4410 bp->strm->avail_out = FW_BUF_SIZE;
4411
4412 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4413 if (rc != Z_OK)
4414 return rc;
4415
4416 rc = zlib_inflate(bp->strm, Z_FINISH);
4417 if ((rc != Z_OK) && (rc != Z_STREAM_END))
Joe Perches7995c642010-02-17 15:01:52 +00004418 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4419 bp->strm->msg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004420
4421 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4422 if (bp->gunzip_outlen & 0x3)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004423 netdev_err(bp->dev, "Firmware decompression error:"
4424 " gunzip_outlen (%d) not aligned\n",
4425 bp->gunzip_outlen);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004426 bp->gunzip_outlen >>= 2;
4427
4428 zlib_inflateEnd(bp->strm);
4429
4430 if (rc == Z_STREAM_END)
4431 return 0;
4432
4433 return rc;
4434}
4435
4436/* nic load/unload */
4437
4438/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004439 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004440 */
4441
4442/* send a NIG loopback debug packet */
4443static void bnx2x_lb_pckt(struct bnx2x *bp)
4444{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004445 u32 wb_write[3];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004446
4447 /* Ethernet source and destination addresses */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004448 wb_write[0] = 0x55555555;
4449 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004450 wb_write[2] = 0x20; /* SOP */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004451 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004452
4453 /* NON-IP protocol */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004454 wb_write[0] = 0x09000000;
4455 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004456 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004457 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004458}
4459
4460/* some of the internal memories
4461 * are not directly readable from the driver
4462 * to test them we send debug packets
4463 */
4464static int bnx2x_int_mem_test(struct bnx2x *bp)
4465{
4466 int factor;
4467 int count, i;
4468 u32 val = 0;
4469
Eilon Greensteinad8d3942008-06-23 20:29:02 -07004470 if (CHIP_REV_IS_FPGA(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004471 factor = 120;
Eilon Greensteinad8d3942008-06-23 20:29:02 -07004472 else if (CHIP_REV_IS_EMUL(bp))
4473 factor = 200;
4474 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004475 factor = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004476
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004477 /* Disable inputs of parser neighbor blocks */
4478 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4479 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4480 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004481 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004482
4483 /* Write 0 to parser credits for CFC search request */
4484 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4485
4486 /* send Ethernet packet */
4487 bnx2x_lb_pckt(bp);
4488
4489 /* TODO do i reset NIG statistic? */
4490 /* Wait until NIG register shows 1 packet of size 0x10 */
4491 count = 1000 * factor;
4492 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004493
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004494 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4495 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004496 if (val == 0x10)
4497 break;
4498
4499 msleep(10);
4500 count--;
4501 }
4502 if (val != 0x10) {
4503 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4504 return -1;
4505 }
4506
4507 /* Wait until PRS register shows 1 packet */
4508 count = 1000 * factor;
4509 while (count) {
4510 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004511 if (val == 1)
4512 break;
4513
4514 msleep(10);
4515 count--;
4516 }
4517 if (val != 0x1) {
4518 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4519 return -2;
4520 }
4521
4522 /* Reset and init BRB, PRS */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004523 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004524 msleep(50);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004525 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004526 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004527 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4528 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004529
4530 DP(NETIF_MSG_HW, "part2\n");
4531
4532 /* Disable inputs of parser neighbor blocks */
4533 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4534 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4535 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004536 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004537
4538 /* Write 0 to parser credits for CFC search request */
4539 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4540
4541 /* send 10 Ethernet packets */
4542 for (i = 0; i < 10; i++)
4543 bnx2x_lb_pckt(bp);
4544
4545 /* Wait until NIG register shows 10 + 1
4546 packets of size 11*0x10 = 0xb0 */
4547 count = 1000 * factor;
4548 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004549
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004550 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4551 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004552 if (val == 0xb0)
4553 break;
4554
4555 msleep(10);
4556 count--;
4557 }
4558 if (val != 0xb0) {
4559 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4560 return -3;
4561 }
4562
4563 /* Wait until PRS register shows 2 packets */
4564 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4565 if (val != 2)
4566 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4567
4568 /* Write 1 to parser credits for CFC search request */
4569 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4570
4571 /* Wait until PRS register shows 3 packets */
4572 msleep(10 * factor);
4573 /* Wait until NIG register shows 1 packet of size 0x10 */
4574 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4575 if (val != 3)
4576 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4577
4578 /* clear NIG EOP FIFO */
4579 for (i = 0; i < 11; i++)
4580 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4581 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4582 if (val != 1) {
4583 BNX2X_ERR("clear of NIG failed\n");
4584 return -4;
4585 }
4586
4587 /* Reset and init BRB, PRS, NIG */
4588 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4589 msleep(50);
4590 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4591 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004592 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4593 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00004594#ifndef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004595 /* set NIC mode */
4596 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4597#endif
4598
4599 /* Enable inputs of parser neighbor blocks */
4600 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4601 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4602 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004603 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004604
4605 DP(NETIF_MSG_HW, "done\n");
4606
4607 return 0; /* OK */
4608}
4609
4610static void enable_blocks_attention(struct bnx2x *bp)
4611{
4612 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004613 if (CHIP_IS_E2(bp))
4614 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4615 else
4616 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004617 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4618 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004619 /*
4620 * mask read length error interrupts in brb for parser
4621 * (parsing unit and 'checksum and crc' unit)
4622 * these errors are legal (PU reads fixed length and CAC can cause
4623 * read length error on truncated packets)
4624 */
4625 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004626 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4627 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4628 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4629 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4630 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004631/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4632/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004633 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4634 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4635 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004636/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4637/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004638 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4639 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4640 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4641 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004642/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4643/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
4644 if (CHIP_REV_IS_FPGA(bp))
4645 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004646 else if (CHIP_IS_E2(bp))
4647 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4648 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4649 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4650 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4651 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4652 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004653 else
4654 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004655 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4656 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4657 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004658/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4659/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004660 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4661 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004662/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4663 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004664}
4665
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004666static const struct {
4667 u32 addr;
4668 u32 mask;
4669} bnx2x_parity_mask[] = {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004670 {PXP_REG_PXP_PRTY_MASK, 0x3ffffff},
4671 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
4672 {PXP2_REG_PXP2_PRTY_MASK_1, 0x7f},
4673 {HC_REG_HC_PRTY_MASK, 0x7},
4674 {MISC_REG_MISC_PRTY_MASK, 0x1},
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004675 {QM_REG_QM_PRTY_MASK, 0x0},
4676 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
4677 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
4678 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
4679 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
4680 {CDU_REG_CDU_PRTY_MASK, 0x0},
4681 {CFC_REG_CFC_PRTY_MASK, 0x0},
4682 {DBG_REG_DBG_PRTY_MASK, 0x0},
4683 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
4684 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
4685 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
4686 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
4687 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
4688 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
4689 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
4690 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
4691 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
4692 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
4693 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
4694 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
4695 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
4696 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
4697 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
4698};
4699
4700static void enable_blocks_parity(struct bnx2x *bp)
4701{
Nikitas Angelinascbd9da72010-09-08 11:20:37 +00004702 int i;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004703
Nikitas Angelinascbd9da72010-09-08 11:20:37 +00004704 for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004705 REG_WR(bp, bnx2x_parity_mask[i].addr,
4706 bnx2x_parity_mask[i].mask);
4707}
4708
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004709
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00004710static void bnx2x_reset_common(struct bnx2x *bp)
4711{
4712 /* reset_common */
4713 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4714 0xd3ffff7f);
4715 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4716}
4717
Eilon Greenstein573f2032009-08-12 08:24:14 +00004718static void bnx2x_init_pxp(struct bnx2x *bp)
4719{
4720 u16 devctl;
4721 int r_order, w_order;
4722
4723 pci_read_config_word(bp->pdev,
4724 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4725 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4726 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4727 if (bp->mrrs == -1)
4728 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4729 else {
4730 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4731 r_order = bp->mrrs;
4732 }
4733
4734 bnx2x_init_pxp_arb(bp, r_order, w_order);
4735}
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004736
4737static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4738{
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004739 int is_required;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004740 u32 val;
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004741 int port;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004742
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004743 if (BP_NOMCP(bp))
4744 return;
4745
4746 is_required = 0;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004747 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4748 SHARED_HW_CFG_FAN_FAILURE_MASK;
4749
4750 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4751 is_required = 1;
4752
4753 /*
4754 * The fan failure mechanism is usually related to the PHY type since
4755 * the power consumption of the board is affected by the PHY. Currently,
4756 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4757 */
4758 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4759 for (port = PORT_0; port < PORT_MAX; port++) {
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004760 is_required |=
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004761 bnx2x_fan_failure_det_req(
4762 bp,
4763 bp->common.shmem_base,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00004764 bp->common.shmem2_base,
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004765 port);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004766 }
4767
4768 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4769
4770 if (is_required == 0)
4771 return;
4772
4773 /* Fan failure is indicated by SPIO 5 */
4774 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4775 MISC_REGISTERS_SPIO_INPUT_HI_Z);
4776
4777 /* set to active low mode */
4778 val = REG_RD(bp, MISC_REG_SPIO_INT);
4779 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004780 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004781 REG_WR(bp, MISC_REG_SPIO_INT, val);
4782
4783 /* enable interrupt to signal the IGU */
4784 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4785 val |= (1 << MISC_REGISTERS_SPIO_5);
4786 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4787}
4788
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004789static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4790{
4791 u32 offset = 0;
4792
4793 if (CHIP_IS_E1(bp))
4794 return;
4795 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4796 return;
4797
4798 switch (BP_ABS_FUNC(bp)) {
4799 case 0:
4800 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4801 break;
4802 case 1:
4803 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4804 break;
4805 case 2:
4806 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4807 break;
4808 case 3:
4809 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4810 break;
4811 case 4:
4812 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4813 break;
4814 case 5:
4815 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4816 break;
4817 case 6:
4818 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4819 break;
4820 case 7:
4821 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4822 break;
4823 default:
4824 return;
4825 }
4826
4827 REG_WR(bp, offset, pretend_func_num);
4828 REG_RD(bp, offset);
4829 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4830}
4831
4832static void bnx2x_pf_disable(struct bnx2x *bp)
4833{
4834 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4835 val &= ~IGU_PF_CONF_FUNC_EN;
4836
4837 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4838 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4839 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4840}
4841
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004842static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004843{
4844 u32 val, i;
4845
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004846 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004847
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00004848 bnx2x_reset_common(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004849 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4850 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
4851
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004852 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004853 if (!CHIP_IS_E1(bp))
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004854 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004855
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004856 if (CHIP_IS_E2(bp)) {
4857 u8 fid;
4858
4859 /**
4860 * 4-port mode or 2-port mode we need to turn of master-enable
4861 * for everyone, after that, turn it back on for self.
4862 * so, we disregard multi-function or not, and always disable
4863 * for all functions on the given path, this means 0,2,4,6 for
4864 * path 0 and 1,3,5,7 for path 1
4865 */
4866 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
4867 if (fid == BP_ABS_FUNC(bp)) {
4868 REG_WR(bp,
4869 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
4870 1);
4871 continue;
4872 }
4873
4874 bnx2x_pretend_func(bp, fid);
4875 /* clear pf enable */
4876 bnx2x_pf_disable(bp);
4877 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4878 }
4879 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004880
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004881 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004882 if (CHIP_IS_E1(bp)) {
4883 /* enable HW interrupt from PXP on USDM overflow
4884 bit 16 on INT_MASK_0 */
4885 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004886 }
4887
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004888 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004889 bnx2x_init_pxp(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004890
4891#ifdef __BIG_ENDIAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004892 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
4893 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
4894 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
4895 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
4896 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
Eilon Greenstein8badd272009-02-12 08:36:15 +00004897 /* make sure this value is 0 */
4898 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004899
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004900/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
4901 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
4902 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
4903 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
4904 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004905#endif
4906
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004907 bnx2x_ilt_init_page_size(bp, INITOP_SET);
4908
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004909
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004910 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
4911 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004912
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004913 /* let the HW do it's magic ... */
4914 msleep(100);
4915 /* finish PXP init */
4916 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
4917 if (val != 1) {
4918 BNX2X_ERR("PXP2 CFG failed\n");
4919 return -EBUSY;
4920 }
4921 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
4922 if (val != 1) {
4923 BNX2X_ERR("PXP2 RD_INIT failed\n");
4924 return -EBUSY;
4925 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004926
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004927 /* Timers bug workaround E2 only. We need to set the entire ILT to
4928 * have entries with value "0" and valid bit on.
4929 * This needs to be done by the first PF that is loaded in a path
4930 * (i.e. common phase)
4931 */
4932 if (CHIP_IS_E2(bp)) {
4933 struct ilt_client_info ilt_cli;
4934 struct bnx2x_ilt ilt;
4935 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
4936 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
4937
4938 /* initalize dummy TM client */
4939 ilt_cli.start = 0;
4940 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
4941 ilt_cli.client_num = ILT_CLIENT_TM;
4942
4943 /* Step 1: set zeroes to all ilt page entries with valid bit on
4944 * Step 2: set the timers first/last ilt entry to point
4945 * to the entire range to prevent ILT range error for 3rd/4th
4946 * vnic (this code assumes existance of the vnic)
4947 *
4948 * both steps performed by call to bnx2x_ilt_client_init_op()
4949 * with dummy TM client
4950 *
4951 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
4952 * and his brother are split registers
4953 */
4954 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
4955 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
4956 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4957
4958 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
4959 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
4960 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
4961 }
4962
4963
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004964 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
4965 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004966
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004967 if (CHIP_IS_E2(bp)) {
4968 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
4969 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
4970 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
4971
4972 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
4973
4974 /* let the HW do it's magic ... */
4975 do {
4976 msleep(200);
4977 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
4978 } while (factor-- && (val != 1));
4979
4980 if (val != 1) {
4981 BNX2X_ERR("ATC_INIT failed\n");
4982 return -EBUSY;
4983 }
4984 }
4985
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004986 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004987
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004988 /* clean the DMAE memory */
4989 bp->dmae_ready = 1;
4990 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004991
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004992 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
4993 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
4994 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
4995 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004996
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004997 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
4998 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
4999 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5000 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5001
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005002 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00005003
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005004 if (CHIP_MODE_IS_4_PORT(bp))
5005 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005006 /* QM queues pointers table */
5007 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
Michael Chan37b091b2009-10-10 13:46:55 +00005008
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005009 /* soft reset pulse */
5010 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5011 REG_WR(bp, QM_REG_SOFT_RESET, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005012
Michael Chan37b091b2009-10-10 13:46:55 +00005013#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005014 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005015#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005016
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005017 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005018 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
5019
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005020 if (!CHIP_REV_IS_SLOW(bp)) {
5021 /* enable hw interrupt from doorbell Q */
5022 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5023 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005024
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005025 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005026 if (CHIP_MODE_IS_4_PORT(bp)) {
5027 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5028 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5029 }
5030
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005031 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08005032 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
Michael Chan37b091b2009-10-10 13:46:55 +00005033#ifndef BCM_CNIC
Eilon Greenstein3196a882008-08-13 15:58:49 -07005034 /* set NIC mode */
5035 REG_WR(bp, PRS_REG_NIC_MODE, 1);
Michael Chan37b091b2009-10-10 13:46:55 +00005036#endif
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005037 if (!CHIP_IS_E1(bp))
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005038 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF(bp));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005039 if (CHIP_IS_E2(bp)) {
5040 /* Bit-map indicating which L2 hdrs may appear after the
5041 basic Ethernet header */
5042 int has_ovlan = IS_MF(bp);
5043 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5044 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5045 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005046
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005047 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5048 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5049 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5050 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005051
Eilon Greensteinca003922009-08-12 22:53:28 -07005052 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5053 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5054 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5055 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005056
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005057 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5058 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5059 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5060 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005061
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005062 if (CHIP_MODE_IS_4_PORT(bp))
5063 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5064
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005065 /* sync semi rtc */
5066 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5067 0x80000000);
5068 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5069 0x80000000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005070
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005071 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5072 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5073 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005074
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005075 if (CHIP_IS_E2(bp)) {
5076 int has_ovlan = IS_MF(bp);
5077 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5078 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5079 }
5080
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005081 REG_WR(bp, SRC_REG_SOFT_RST, 1);
Tom Herbertc68ed252010-04-23 00:10:52 -07005082 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5083 REG_WR(bp, i, random32());
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005084 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00005085#ifdef BCM_CNIC
5086 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
5087 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
5088 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
5089 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
5090 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
5091 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
5092 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
5093 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
5094 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
5095 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
5096#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005097 REG_WR(bp, SRC_REG_SOFT_RST, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005098
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005099 if (sizeof(union cdu_context) != 1024)
5100 /* we currently assume that a context is 1024 bytes */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005101 dev_alert(&bp->pdev->dev, "please adjust the size "
5102 "of cdu_context(%ld)\n",
Joe Perches7995c642010-02-17 15:01:52 +00005103 (long)sizeof(union cdu_context));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005104
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005105 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005106 val = (4 << 24) + (0 << 12) + 1024;
5107 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005108
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005109 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005110 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005111 /* enable context validation interrupt from CFC */
5112 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5113
5114 /* set the thresholds to prevent CFC/CDU race */
5115 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005116
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005117 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005118
5119 if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5120 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5121
5122 bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005123 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005124
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005125 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005126 /* Reset PCIE errors for debug */
5127 REG_WR(bp, 0x2814, 0xffffffff);
5128 REG_WR(bp, 0x3820, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005129
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005130 if (CHIP_IS_E2(bp)) {
5131 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5132 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5133 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5134 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5135 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5136 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5137 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5138 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5139 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5140 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5141 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5142 }
5143
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005144 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005145 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005146 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005147 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005148
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005149 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005150 if (!CHIP_IS_E1(bp)) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005151 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
5152 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005153 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005154 if (CHIP_IS_E2(bp)) {
5155 /* Bit-map indicating which L2 hdrs may appear after the
5156 basic Ethernet header */
5157 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF(bp) ? 7 : 6));
5158 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005159
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005160 if (CHIP_REV_IS_SLOW(bp))
5161 msleep(200);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005162
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005163 /* finish CFC init */
5164 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5165 if (val != 1) {
5166 BNX2X_ERR("CFC LL_INIT failed\n");
5167 return -EBUSY;
5168 }
5169 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5170 if (val != 1) {
5171 BNX2X_ERR("CFC AC_INIT failed\n");
5172 return -EBUSY;
5173 }
5174 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5175 if (val != 1) {
5176 BNX2X_ERR("CFC CAM_INIT failed\n");
5177 return -EBUSY;
5178 }
5179 REG_WR(bp, CFC_REG_DEBUG0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005180
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005181 if (CHIP_IS_E1(bp)) {
5182 /* read NIG statistic
5183 to see if this is our first up since powerup */
5184 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5185 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005186
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005187 /* do internal memory self test */
5188 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5189 BNX2X_ERR("internal mem self test failed\n");
5190 return -EBUSY;
5191 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005192 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005193
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00005194 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00005195 bp->common.shmem_base,
5196 bp->common.shmem2_base);
Eliezer Tamirf1410642008-02-28 11:51:50 -08005197
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00005198 bnx2x_setup_fan_failure_detection(bp);
5199
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005200 /* clear PXP2 attentions */
5201 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005202
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005203 enable_blocks_attention(bp);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00005204 if (CHIP_PARITY_SUPPORTED(bp))
5205 enable_blocks_parity(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005206
Yaniv Rosner6bbca912008-08-13 15:57:28 -07005207 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005208 /* In E2 2-PORT mode, same ext phy is used for the two paths */
5209 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
5210 CHIP_IS_E1x(bp)) {
5211 u32 shmem_base[2], shmem2_base[2];
5212 shmem_base[0] = bp->common.shmem_base;
5213 shmem2_base[0] = bp->common.shmem2_base;
5214 if (CHIP_IS_E2(bp)) {
5215 shmem_base[1] =
5216 SHMEM2_RD(bp, other_shmem_base_addr);
5217 shmem2_base[1] =
5218 SHMEM2_RD(bp, other_shmem2_base_addr);
5219 }
5220 bnx2x_acquire_phy_lock(bp);
5221 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5222 bp->common.chip_id);
5223 bnx2x_release_phy_lock(bp);
5224 }
Yaniv Rosner6bbca912008-08-13 15:57:28 -07005225 } else
5226 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5227
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005228 return 0;
5229}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005230
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005231static int bnx2x_init_hw_port(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005232{
5233 int port = BP_PORT(bp);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005234 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
Eilon Greenstein1c063282009-02-12 08:36:43 +00005235 u32 low, high;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005236 u32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005237
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005238 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005239
5240 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005241
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005242 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005243 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07005244
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005245 /* Timers bug workaround: disables the pf_master bit in pglue at
5246 * common phase, we need to enable it here before any dmae access are
5247 * attempted. Therefore we manually added the enable-master to the
5248 * port phase (it also happens in the function phase)
5249 */
5250 if (CHIP_IS_E2(bp))
5251 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5252
Eilon Greensteinca003922009-08-12 22:53:28 -07005253 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5254 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5255 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005256 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005257
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005258 /* QM cid (connection) count */
5259 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005260
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005261#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005262 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
Michael Chan37b091b2009-10-10 13:46:55 +00005263 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
5264 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005265#endif
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005266
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005267 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00005268
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005269 if (CHIP_MODE_IS_4_PORT(bp))
5270 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00005271
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005272 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
5273 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5274 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
5275 /* no pause for emulation and FPGA */
5276 low = 0;
5277 high = 513;
5278 } else {
5279 if (IS_MF(bp))
5280 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5281 else if (bp->dev->mtu > 4096) {
5282 if (bp->flags & ONE_PORT_FLAG)
5283 low = 160;
5284 else {
5285 val = bp->dev->mtu;
5286 /* (24*1024 + val*4)/256 */
5287 low = 96 + (val/64) +
5288 ((val % 64) ? 1 : 0);
5289 }
5290 } else
5291 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5292 high = low + 56; /* 14*1024/256 */
5293 }
5294 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5295 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5296 }
5297
5298 if (CHIP_MODE_IS_4_PORT(bp)) {
5299 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5300 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5301 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5302 BRB1_REG_MAC_GUARANTIED_0), 40);
5303 }
Eilon Greenstein1c063282009-02-12 08:36:43 +00005304
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005305 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07005306
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005307 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005308 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005309 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005310 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005311
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005312 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5313 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5314 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5315 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005316 if (CHIP_MODE_IS_4_PORT(bp))
5317 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005318
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005319 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005320 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005321
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005322 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005323
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005324 if (!CHIP_IS_E2(bp)) {
5325 /* configure PBF to work without PAUSE mtu 9000 */
5326 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005327
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005328 /* update threshold */
5329 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5330 /* update init credit */
5331 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005332
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005333 /* probe changes */
5334 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5335 udelay(50);
5336 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5337 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005338
Michael Chan37b091b2009-10-10 13:46:55 +00005339#ifdef BCM_CNIC
5340 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005341#endif
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005342 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005343 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005344
5345 if (CHIP_IS_E1(bp)) {
5346 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5347 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5348 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005349 bnx2x_init_block(bp, HC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005350
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005351 bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5352
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005353 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005354 /* init aeu_mask_attn_func_0/1:
5355 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5356 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5357 * bits 4-7 are used for "per vn group attention" */
5358 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005359 (IS_MF(bp) ? 0xF7 : 0x7));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005360
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005361 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005362 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005363 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005364 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005365 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005366
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005367 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005368
5369 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5370
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005371 if (!CHIP_IS_E1(bp)) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005372 /* 0x2 disable mf_ov, 0x1 enable */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005373 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005374 (IS_MF(bp) ? 0x1 : 0x2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005375
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005376 if (CHIP_IS_E2(bp)) {
5377 val = 0;
5378 switch (bp->mf_mode) {
5379 case MULTI_FUNCTION_SD:
5380 val = 1;
5381 break;
5382 case MULTI_FUNCTION_SI:
5383 val = 2;
5384 break;
5385 }
5386
5387 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5388 NIG_REG_LLH0_CLS_TYPE), val);
5389 }
Eilon Greenstein1c063282009-02-12 08:36:43 +00005390 {
5391 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5392 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5393 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5394 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005395 }
5396
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005397 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005398 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00005399 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00005400 bp->common.shmem_base,
5401 bp->common.shmem2_base);
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00005402 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00005403 bp->common.shmem2_base, port)) {
Eilon Greenstein4d295db2009-07-21 05:47:47 +00005404 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5405 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5406 val = REG_RD(bp, reg_addr);
Eliezer Tamirf1410642008-02-28 11:51:50 -08005407 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
Eilon Greenstein4d295db2009-07-21 05:47:47 +00005408 REG_WR(bp, reg_addr, val);
Eliezer Tamirf1410642008-02-28 11:51:50 -08005409 }
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07005410 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005411
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005412 return 0;
5413}
5414
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005415static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5416{
5417 int reg;
5418
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005419 if (CHIP_IS_E1(bp))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005420 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005421 else
5422 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005423
5424 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5425}
5426
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005427static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5428{
5429 bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5430}
5431
5432static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5433{
5434 u32 i, base = FUNC_ILT_BASE(func);
5435 for (i = base; i < base + ILT_PER_FUNC; i++)
5436 bnx2x_ilt_wr(bp, i, 0);
5437}
5438
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005439static int bnx2x_init_hw_func(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005440{
5441 int port = BP_PORT(bp);
5442 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005443 struct bnx2x_ilt *ilt = BP_ILT(bp);
5444 u16 cdu_ilt_start;
Eilon Greenstein8badd272009-02-12 08:36:15 +00005445 u32 addr, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005446 int i;
5447
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005448 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005449
Eilon Greenstein8badd272009-02-12 08:36:15 +00005450 /* set MSI reconfigure capability */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005451 if (bp->common.int_block == INT_BLOCK_HC) {
5452 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5453 val = REG_RD(bp, addr);
5454 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5455 REG_WR(bp, addr, val);
5456 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00005457
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005458 ilt = BP_ILT(bp);
5459 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005460
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005461 for (i = 0; i < L2_ILT_LINES(bp); i++) {
5462 ilt->lines[cdu_ilt_start + i].page =
5463 bp->context.vcxt + (ILT_PAGE_CIDS * i);
5464 ilt->lines[cdu_ilt_start + i].page_mapping =
5465 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
5466 /* cdu ilt pages are allocated manually so there's no need to
5467 set the size */
5468 }
5469 bnx2x_ilt_init_op(bp, INITOP_SET);
Michael Chan37b091b2009-10-10 13:46:55 +00005470#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005471 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
Michael Chan37b091b2009-10-10 13:46:55 +00005472
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005473 /* T1 hash bits value determines the T1 number of entries */
5474 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
Michael Chan37b091b2009-10-10 13:46:55 +00005475#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005476
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005477#ifndef BCM_CNIC
5478 /* set NIC mode */
5479 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5480#endif /* BCM_CNIC */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005481
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005482 if (CHIP_IS_E2(bp)) {
5483 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5484
5485 /* Turn on a single ISR mode in IGU if driver is going to use
5486 * INT#x or MSI
5487 */
5488 if (!(bp->flags & USING_MSIX_FLAG))
5489 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5490 /*
5491 * Timers workaround bug: function init part.
5492 * Need to wait 20msec after initializing ILT,
5493 * needed to make sure there are no requests in
5494 * one of the PXP internal queues with "old" ILT addresses
5495 */
5496 msleep(20);
5497 /*
5498 * Master enable - Due to WB DMAE writes performed before this
5499 * register is re-initialized as part of the regular function
5500 * init
5501 */
5502 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5503 /* Enable the function in IGU */
5504 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5505 }
5506
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005507 bp->dmae_ready = 1;
5508
5509 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
5510
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005511 if (CHIP_IS_E2(bp))
5512 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5513
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005514 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5515 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5516 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5517 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5518 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5519 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5520 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5521 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5522 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5523
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005524 if (CHIP_IS_E2(bp)) {
5525 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5526 BP_PATH(bp));
5527 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5528 BP_PATH(bp));
5529 }
5530
5531 if (CHIP_MODE_IS_4_PORT(bp))
5532 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5533
5534 if (CHIP_IS_E2(bp))
5535 REG_WR(bp, QM_REG_PF_EN, 1);
5536
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005537 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005538
5539 if (CHIP_MODE_IS_4_PORT(bp))
5540 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5541
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005542 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5543 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5544 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5545 bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5546 bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5547 bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5548 bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5549 bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5550 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5551 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5552 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005553 if (CHIP_IS_E2(bp))
5554 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5555
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005556 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
5557
5558 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
5559
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005560 if (CHIP_IS_E2(bp))
5561 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5562
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005563 if (IS_MF(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005564 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005565 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005566 }
5567
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005568 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5569
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005570 /* HC init per function */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005571 if (bp->common.int_block == INT_BLOCK_HC) {
5572 if (CHIP_IS_E1H(bp)) {
5573 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5574
5575 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5576 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5577 }
5578 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5579
5580 } else {
5581 int num_segs, sb_idx, prod_offset;
5582
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005583 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5584
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005585 if (CHIP_IS_E2(bp)) {
5586 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5587 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5588 }
5589
5590 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5591
5592 if (CHIP_IS_E2(bp)) {
5593 int dsb_idx = 0;
5594 /**
5595 * Producer memory:
5596 * E2 mode: address 0-135 match to the mapping memory;
5597 * 136 - PF0 default prod; 137 - PF1 default prod;
5598 * 138 - PF2 default prod; 139 - PF3 default prod;
5599 * 140 - PF0 attn prod; 141 - PF1 attn prod;
5600 * 142 - PF2 attn prod; 143 - PF3 attn prod;
5601 * 144-147 reserved.
5602 *
5603 * E1.5 mode - In backward compatible mode;
5604 * for non default SB; each even line in the memory
5605 * holds the U producer and each odd line hold
5606 * the C producer. The first 128 producers are for
5607 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5608 * producers are for the DSB for each PF.
5609 * Each PF has five segments: (the order inside each
5610 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5611 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5612 * 144-147 attn prods;
5613 */
5614 /* non-default-status-blocks */
5615 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5616 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5617 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5618 prod_offset = (bp->igu_base_sb + sb_idx) *
5619 num_segs;
5620
5621 for (i = 0; i < num_segs; i++) {
5622 addr = IGU_REG_PROD_CONS_MEMORY +
5623 (prod_offset + i) * 4;
5624 REG_WR(bp, addr, 0);
5625 }
5626 /* send consumer update with value 0 */
5627 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5628 USTORM_ID, 0, IGU_INT_NOP, 1);
5629 bnx2x_igu_clear_sb(bp,
5630 bp->igu_base_sb + sb_idx);
5631 }
5632
5633 /* default-status-blocks */
5634 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5635 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5636
5637 if (CHIP_MODE_IS_4_PORT(bp))
5638 dsb_idx = BP_FUNC(bp);
5639 else
5640 dsb_idx = BP_E1HVN(bp);
5641
5642 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5643 IGU_BC_BASE_DSB_PROD + dsb_idx :
5644 IGU_NORM_BASE_DSB_PROD + dsb_idx);
5645
5646 for (i = 0; i < (num_segs * E1HVN_MAX);
5647 i += E1HVN_MAX) {
5648 addr = IGU_REG_PROD_CONS_MEMORY +
5649 (prod_offset + i)*4;
5650 REG_WR(bp, addr, 0);
5651 }
5652 /* send consumer update with 0 */
5653 if (CHIP_INT_MODE_IS_BC(bp)) {
5654 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5655 USTORM_ID, 0, IGU_INT_NOP, 1);
5656 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5657 CSTORM_ID, 0, IGU_INT_NOP, 1);
5658 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5659 XSTORM_ID, 0, IGU_INT_NOP, 1);
5660 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5661 TSTORM_ID, 0, IGU_INT_NOP, 1);
5662 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5663 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5664 } else {
5665 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5666 USTORM_ID, 0, IGU_INT_NOP, 1);
5667 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5668 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5669 }
5670 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5671
5672 /* !!! these should become driver const once
5673 rf-tool supports split-68 const */
5674 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5675 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5676 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5677 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5678 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5679 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5680 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005681 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005682
Eliezer Tamirc14423f2008-02-28 11:49:42 -08005683 /* Reset PCIE errors for debug */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005684 REG_WR(bp, 0x2114, 0xffffffff);
5685 REG_WR(bp, 0x2120, 0xffffffff);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005686
5687 bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5688 bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5689 bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5690 bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5691 bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5692 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5693
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00005694 bnx2x_phy_probe(&bp->link_params);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005695 return 0;
5696}
5697
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005698int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005699{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005700 int rc = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005701
5702 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005703 BP_ABS_FUNC(bp), load_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005704
5705 bp->dmae_ready = 0;
5706 mutex_init(&bp->dmae_mutex);
Eilon Greenstein54016b22009-08-12 08:23:48 +00005707 rc = bnx2x_gunzip_init(bp);
5708 if (rc)
5709 return rc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005710
5711 switch (load_code) {
5712 case FW_MSG_CODE_DRV_LOAD_COMMON:
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005713 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005714 rc = bnx2x_init_hw_common(bp, load_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005715 if (rc)
5716 goto init_hw_err;
5717 /* no break */
5718
5719 case FW_MSG_CODE_DRV_LOAD_PORT:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005720 rc = bnx2x_init_hw_port(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005721 if (rc)
5722 goto init_hw_err;
5723 /* no break */
5724
5725 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005726 rc = bnx2x_init_hw_func(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005727 if (rc)
5728 goto init_hw_err;
5729 break;
5730
5731 default:
5732 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5733 break;
5734 }
5735
5736 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005737 int mb_idx = BP_FW_MB_IDX(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005738
5739 bp->fw_drv_pulse_wr_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005740 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005741 DRV_PULSE_SEQ_MASK);
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00005742 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5743 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005744
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005745init_hw_err:
5746 bnx2x_gunzip_end(bp);
5747
5748 return rc;
5749}
5750
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005751void bnx2x_free_mem(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005752{
5753
5754#define BNX2X_PCI_FREE(x, y, size) \
5755 do { \
5756 if (x) { \
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005757 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005758 x = NULL; \
5759 y = 0; \
5760 } \
5761 } while (0)
5762
5763#define BNX2X_FREE(x) \
5764 do { \
5765 if (x) { \
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005766 kfree((void *)x); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005767 x = NULL; \
5768 } \
5769 } while (0)
5770
5771 int i;
5772
5773 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005774 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005775 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005776 /* status blocks */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005777 if (CHIP_IS_E2(bp))
5778 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
5779 bnx2x_fp(bp, i, status_blk_mapping),
5780 sizeof(struct host_hc_status_block_e2));
5781 else
5782 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
5783 bnx2x_fp(bp, i, status_blk_mapping),
5784 sizeof(struct host_hc_status_block_e1x));
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005785 }
5786 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005787 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005788
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005789 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005790 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5791 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5792 bnx2x_fp(bp, i, rx_desc_mapping),
5793 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5794
5795 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5796 bnx2x_fp(bp, i, rx_comp_mapping),
5797 sizeof(struct eth_fast_path_rx_cqe) *
5798 NUM_RCQ_BD);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005799
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005800 /* SGE ring */
Eilon Greenstein32626232008-08-13 15:51:07 -07005801 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005802 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5803 bnx2x_fp(bp, i, rx_sge_mapping),
5804 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5805 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005806 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005807 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005808
5809 /* fastpath tx rings: tx_buf tx_desc */
5810 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5811 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5812 bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07005813 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005814 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005815 /* end of fastpath */
5816
5817 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005818 sizeof(struct host_sp_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005819
5820 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005821 sizeof(struct bnx2x_slowpath));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005822
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005823 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5824 bp->context.size);
5825
5826 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5827
5828 BNX2X_FREE(bp->ilt->lines);
Michael Chan37b091b2009-10-10 13:46:55 +00005829#ifdef BCM_CNIC
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005830 if (CHIP_IS_E2(bp))
5831 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
5832 sizeof(struct host_hc_status_block_e2));
5833 else
5834 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5835 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005836 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005837#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005838 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005839
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005840 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
5841 BCM_PAGE_SIZE * NUM_EQ_PAGES);
5842
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005843#undef BNX2X_PCI_FREE
5844#undef BNX2X_KFREE
5845}
5846
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005847static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
5848{
5849 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
5850 if (CHIP_IS_E2(bp)) {
5851 bnx2x_fp(bp, index, sb_index_values) =
5852 (__le16 *)status_blk.e2_sb->sb.index_values;
5853 bnx2x_fp(bp, index, sb_running_index) =
5854 (__le16 *)status_blk.e2_sb->sb.running_index;
5855 } else {
5856 bnx2x_fp(bp, index, sb_index_values) =
5857 (__le16 *)status_blk.e1x_sb->sb.index_values;
5858 bnx2x_fp(bp, index, sb_running_index) =
5859 (__le16 *)status_blk.e1x_sb->sb.running_index;
5860 }
5861}
5862
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005863int bnx2x_alloc_mem(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005864{
5865
5866#define BNX2X_PCI_ALLOC(x, y, size) \
5867 do { \
FUJITA Tomonori1a983142010-04-04 01:51:03 +00005868 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005869 if (x == NULL) \
5870 goto alloc_mem_err; \
5871 memset(x, 0, size); \
5872 } while (0)
5873
5874#define BNX2X_ALLOC(x, size) \
5875 do { \
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005876 x = kzalloc(size, GFP_KERNEL); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005877 if (x == NULL) \
5878 goto alloc_mem_err; \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005879 } while (0)
5880
5881 int i;
5882
5883 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005884 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005885 for_each_queue(bp, i) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005886 union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005887 bnx2x_fp(bp, i, bp) = bp;
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005888 /* status blocks */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005889 if (CHIP_IS_E2(bp))
5890 BNX2X_PCI_ALLOC(sb->e2_sb,
5891 &bnx2x_fp(bp, i, status_blk_mapping),
5892 sizeof(struct host_hc_status_block_e2));
5893 else
5894 BNX2X_PCI_ALLOC(sb->e1x_sb,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005895 &bnx2x_fp(bp, i, status_blk_mapping),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005896 sizeof(struct host_hc_status_block_e1x));
5897
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005898 set_sb_shortcuts(bp, i);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005899 }
5900 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005901 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005902
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005903 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005904 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5905 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5906 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5907 &bnx2x_fp(bp, i, rx_desc_mapping),
5908 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5909
5910 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5911 &bnx2x_fp(bp, i, rx_comp_mapping),
5912 sizeof(struct eth_fast_path_rx_cqe) *
5913 NUM_RCQ_BD);
5914
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005915 /* SGE ring */
5916 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5917 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5918 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5919 &bnx2x_fp(bp, i, rx_sge_mapping),
5920 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005921 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005922 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005923 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005924
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005925 /* fastpath tx rings: tx_buf tx_desc */
5926 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5927 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5928 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5929 &bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07005930 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005931 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005932 /* end of fastpath */
5933
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005934#ifdef BCM_CNIC
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005935 if (CHIP_IS_E2(bp))
5936 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
5937 sizeof(struct host_hc_status_block_e2));
5938 else
5939 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
5940 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005941
5942 /* allocate searcher T2 table */
5943 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
5944#endif
5945
5946
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005947 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005948 sizeof(struct host_sp_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005949
5950 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5951 sizeof(struct bnx2x_slowpath));
5952
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005953 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
5954 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
5955 bp->context.size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005956
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005957 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005958
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005959 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
5960 goto alloc_mem_err;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005961
5962 /* Slow path ring */
5963 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5964
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005965 /* EQ */
5966 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
5967 BCM_PAGE_SIZE * NUM_EQ_PAGES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005968 return 0;
5969
5970alloc_mem_err:
5971 bnx2x_free_mem(bp);
5972 return -ENOMEM;
5973
5974#undef BNX2X_PCI_ALLOC
5975#undef BNX2X_ALLOC
5976}
5977
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005978/*
5979 * Init service functions
5980 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005981int bnx2x_func_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005982{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005983 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005984
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005985 /* Wait for completion */
5986 return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
5987 WAIT_RAMROD_COMMON);
5988}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005989
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005990int bnx2x_func_stop(struct bnx2x *bp)
5991{
5992 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005993
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005994 /* Wait for completion */
5995 return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
5996 0, &(bp->state), WAIT_RAMROD_COMMON);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005997}
5998
Michael Chane665bfd2009-10-10 13:46:54 +00005999/**
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006000 * Sets a MAC in a CAM for a few L2 Clients for E1x chip
Michael Chane665bfd2009-10-10 13:46:54 +00006001 *
6002 * @param bp driver descriptor
6003 * @param set set or clear an entry (1 or 0)
6004 * @param mac pointer to a buffer containing a MAC
6005 * @param cl_bit_vec bit vector of clients to register a MAC for
6006 * @param cam_offset offset in a CAM to use
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006007 * @param is_bcast is the set MAC a broadcast address (for E1 only)
Michael Chane665bfd2009-10-10 13:46:54 +00006008 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006009static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
6010 u32 cl_bit_vec, u8 cam_offset,
6011 u8 is_bcast)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006012{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006013 struct mac_configuration_cmd *config =
6014 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
6015 int ramrod_flags = WAIT_RAMROD_COMMON;
6016
6017 bp->set_mac_pending = 1;
6018 smp_wmb();
6019
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08006020 config->hdr.length = 1;
Michael Chane665bfd2009-10-10 13:46:54 +00006021 config->hdr.offset = cam_offset;
6022 config->hdr.client_id = 0xff;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006023 config->hdr.reserved1 = 0;
6024
6025 /* primary MAC */
6026 config->config_table[0].msb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00006027 swab16(*(u16 *)&mac[0]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006028 config->config_table[0].middle_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00006029 swab16(*(u16 *)&mac[2]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006030 config->config_table[0].lsb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00006031 swab16(*(u16 *)&mac[4]);
Eilon Greensteinca003922009-08-12 22:53:28 -07006032 config->config_table[0].clients_bit_vector =
Michael Chane665bfd2009-10-10 13:46:54 +00006033 cpu_to_le32(cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006034 config->config_table[0].vlan_id = 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006035 config->config_table[0].pf_id = BP_FUNC(bp);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006036 if (set)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006037 SET_FLAG(config->config_table[0].flags,
6038 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6039 T_ETH_MAC_COMMAND_SET);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006040 else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006041 SET_FLAG(config->config_table[0].flags,
6042 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6043 T_ETH_MAC_COMMAND_INVALIDATE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006044
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006045 if (is_bcast)
6046 SET_FLAG(config->config_table[0].flags,
6047 MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
6048
6049 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006050 (set ? "setting" : "clearing"),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006051 config->config_table[0].msb_mac_addr,
6052 config->config_table[0].middle_mac_addr,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006053 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006054
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006055 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006056 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006057 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
6058
6059 /* Wait for a completion */
6060 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006061}
6062
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006063
6064int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6065 int *state_p, int flags)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006066{
6067 /* can take a while if any port is running */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006068 int cnt = 5000;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006069 u8 poll = flags & WAIT_RAMROD_POLL;
6070 u8 common = flags & WAIT_RAMROD_COMMON;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006071
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006072 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6073 poll ? "polling" : "waiting", state, idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006074
6075 might_sleep();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006076 while (cnt--) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006077 if (poll) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006078 if (common)
6079 bnx2x_eq_int(bp);
6080 else {
6081 bnx2x_rx_int(bp->fp, 10);
6082 /* if index is different from 0
6083 * the reply for some commands will
6084 * be on the non default queue
6085 */
6086 if (idx)
6087 bnx2x_rx_int(&bp->fp[idx], 10);
6088 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006089 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006090
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006091 mb(); /* state is changed by bnx2x_sp_event() */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006092 if (*state_p == state) {
6093#ifdef BNX2X_STOP_ON_ERROR
6094 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6095#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006096 return 0;
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006097 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006098
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006099 msleep(1);
Eilon Greensteine3553b22009-08-12 08:23:31 +00006100
6101 if (bp->panic)
6102 return -EIO;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006103 }
6104
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006105 /* timeout! */
Eliezer Tamir49d66772008-02-28 11:53:13 -08006106 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6107 poll ? "polling" : "waiting", state, idx);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006108#ifdef BNX2X_STOP_ON_ERROR
6109 bnx2x_panic();
6110#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006111
Eliezer Tamir49d66772008-02-28 11:53:13 -08006112 return -EBUSY;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006113}
6114
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006115u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
Michael Chane665bfd2009-10-10 13:46:54 +00006116{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006117 if (CHIP_IS_E1H(bp))
6118 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6119 else if (CHIP_MODE_IS_4_PORT(bp))
6120 return BP_FUNC(bp) * 32 + rel_offset;
6121 else
6122 return BP_VN(bp) * 32 + rel_offset;
Michael Chane665bfd2009-10-10 13:46:54 +00006123}
6124
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006125void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
Michael Chane665bfd2009-10-10 13:46:54 +00006126{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006127 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
6128 bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
6129
6130 /* networking MAC */
6131 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6132 (1 << bp->fp->cl_id), cam_offset , 0);
6133
6134 if (CHIP_IS_E1(bp)) {
6135 /* broadcast MAC */
6136 u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
6137 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6138 }
6139}
6140static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
6141{
6142 int i = 0, old;
6143 struct net_device *dev = bp->dev;
6144 struct netdev_hw_addr *ha;
6145 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6146 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6147
6148 netdev_for_each_mc_addr(ha, dev) {
6149 /* copy mac */
6150 config_cmd->config_table[i].msb_mac_addr =
6151 swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
6152 config_cmd->config_table[i].middle_mac_addr =
6153 swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
6154 config_cmd->config_table[i].lsb_mac_addr =
6155 swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
6156
6157 config_cmd->config_table[i].vlan_id = 0;
6158 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
6159 config_cmd->config_table[i].clients_bit_vector =
6160 cpu_to_le32(1 << BP_L_ID(bp));
6161
6162 SET_FLAG(config_cmd->config_table[i].flags,
6163 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6164 T_ETH_MAC_COMMAND_SET);
6165
6166 DP(NETIF_MSG_IFUP,
6167 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6168 config_cmd->config_table[i].msb_mac_addr,
6169 config_cmd->config_table[i].middle_mac_addr,
6170 config_cmd->config_table[i].lsb_mac_addr);
6171 i++;
6172 }
6173 old = config_cmd->hdr.length;
6174 if (old > i) {
6175 for (; i < old; i++) {
6176 if (CAM_IS_INVALID(config_cmd->
6177 config_table[i])) {
6178 /* already invalidated */
6179 break;
6180 }
6181 /* invalidate */
6182 SET_FLAG(config_cmd->config_table[i].flags,
6183 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6184 T_ETH_MAC_COMMAND_INVALIDATE);
6185 }
6186 }
6187
6188 config_cmd->hdr.length = i;
6189 config_cmd->hdr.offset = offset;
6190 config_cmd->hdr.client_id = 0xff;
6191 config_cmd->hdr.reserved1 = 0;
6192
6193 bp->set_mac_pending = 1;
Michael Chane665bfd2009-10-10 13:46:54 +00006194 smp_wmb();
6195
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006196 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6197 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6198}
6199static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
6200{
6201 int i;
6202 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6203 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6204 int ramrod_flags = WAIT_RAMROD_COMMON;
6205
6206 bp->set_mac_pending = 1;
6207 smp_wmb();
6208
6209 for (i = 0; i < config_cmd->hdr.length; i++)
6210 SET_FLAG(config_cmd->config_table[i].flags,
6211 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6212 T_ETH_MAC_COMMAND_INVALIDATE);
6213
6214 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6215 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
Michael Chane665bfd2009-10-10 13:46:54 +00006216
6217 /* Wait for a completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006218 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
6219 ramrod_flags);
6220
Michael Chane665bfd2009-10-10 13:46:54 +00006221}
6222
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006223
Michael Chan993ac7b2009-10-10 13:46:56 +00006224#ifdef BCM_CNIC
6225/**
6226 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
6227 * MAC(s). This function will wait until the ramdord completion
6228 * returns.
6229 *
6230 * @param bp driver handle
6231 * @param set set or clear the CAM entry
6232 *
6233 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6234 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00006235int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
Michael Chan993ac7b2009-10-10 13:46:56 +00006236{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006237 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
6238 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
6239 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID;
6240 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
Michael Chan993ac7b2009-10-10 13:46:56 +00006241
6242 /* Send a SET_MAC ramrod */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006243 bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
6244 cam_offset, 0);
Michael Chan993ac7b2009-10-10 13:46:56 +00006245 return 0;
6246}
6247#endif
6248
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006249static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
6250 struct bnx2x_client_init_params *params,
6251 u8 activate,
6252 struct client_init_ramrod_data *data)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006253{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006254 /* Clear the buffer */
6255 memset(data, 0, sizeof(*data));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006256
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006257 /* general */
6258 data->general.client_id = params->rxq_params.cl_id;
6259 data->general.statistics_counter_id = params->rxq_params.stat_id;
6260 data->general.statistics_en_flg =
6261 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
6262 data->general.activate_flg = activate;
6263 data->general.sp_client_id = params->rxq_params.spcl_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006264
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006265 /* Rx data */
6266 data->rx.tpa_en_flg =
6267 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
6268 data->rx.vmqueue_mode_en_flg = 0;
6269 data->rx.cache_line_alignment_log_size =
6270 params->rxq_params.cache_line_log;
6271 data->rx.enable_dynamic_hc =
6272 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
6273 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
6274 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
6275 data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
6276
6277 /* We don't set drop flags */
6278 data->rx.drop_ip_cs_err_flg = 0;
6279 data->rx.drop_tcp_cs_err_flg = 0;
6280 data->rx.drop_ttl0_flg = 0;
6281 data->rx.drop_udp_cs_err_flg = 0;
6282
6283 data->rx.inner_vlan_removal_enable_flg =
6284 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
6285 data->rx.outer_vlan_removal_enable_flg =
6286 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
6287 data->rx.status_block_id = params->rxq_params.fw_sb_id;
6288 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
6289 data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
6290 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
6291 data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
6292 data->rx.bd_page_base.lo =
6293 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
6294 data->rx.bd_page_base.hi =
6295 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
6296 data->rx.sge_page_base.lo =
6297 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
6298 data->rx.sge_page_base.hi =
6299 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
6300 data->rx.cqe_page_base.lo =
6301 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
6302 data->rx.cqe_page_base.hi =
6303 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
6304 data->rx.is_leading_rss =
6305 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
6306 data->rx.is_approx_mcast = data->rx.is_leading_rss;
6307
6308 /* Tx data */
6309 data->tx.enforce_security_flg = 0; /* VF specific */
6310 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
6311 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
6312 data->tx.mtu = 0; /* VF specific */
6313 data->tx.tx_bd_page_base.lo =
6314 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
6315 data->tx.tx_bd_page_base.hi =
6316 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
6317
6318 /* flow control data */
6319 data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
6320 data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
6321 data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
6322 data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
6323 data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
6324 data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
6325 data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
6326
6327 data->fc.safc_group_num = params->txq_params.cos;
6328 data->fc.safc_group_en_flg =
6329 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
6330 data->fc.traffic_type = LLFC_TRAFFIC_TYPE_NW;
6331}
6332
6333static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6334{
6335 /* ustorm cxt validation */
6336 cxt->ustorm_ag_context.cdu_usage =
6337 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
6338 ETH_CONNECTION_TYPE);
6339 /* xcontext validation */
6340 cxt->xstorm_ag_context.cdu_reserved =
6341 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
6342 ETH_CONNECTION_TYPE);
6343}
6344
6345int bnx2x_setup_fw_client(struct bnx2x *bp,
6346 struct bnx2x_client_init_params *params,
6347 u8 activate,
6348 struct client_init_ramrod_data *data,
6349 dma_addr_t data_mapping)
6350{
6351 u16 hc_usec;
6352 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
6353 int ramrod_flags = 0, rc;
6354
6355 /* HC and context validation values */
6356 hc_usec = params->txq_params.hc_rate ?
6357 1000000 / params->txq_params.hc_rate : 0;
6358 bnx2x_update_coalesce_sb_index(bp,
6359 params->txq_params.fw_sb_id,
6360 params->txq_params.sb_cq_index,
6361 !(params->txq_params.flags & QUEUE_FLG_HC),
6362 hc_usec);
6363
6364 *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
6365
6366 hc_usec = params->rxq_params.hc_rate ?
6367 1000000 / params->rxq_params.hc_rate : 0;
6368 bnx2x_update_coalesce_sb_index(bp,
6369 params->rxq_params.fw_sb_id,
6370 params->rxq_params.sb_cq_index,
6371 !(params->rxq_params.flags & QUEUE_FLG_HC),
6372 hc_usec);
6373
6374 bnx2x_set_ctx_validation(params->rxq_params.cxt,
6375 params->rxq_params.cid);
6376
6377 /* zero stats */
6378 if (params->txq_params.flags & QUEUE_FLG_STATS)
6379 storm_memset_xstats_zero(bp, BP_PORT(bp),
6380 params->txq_params.stat_id);
6381
6382 if (params->rxq_params.flags & QUEUE_FLG_STATS) {
6383 storm_memset_ustats_zero(bp, BP_PORT(bp),
6384 params->rxq_params.stat_id);
6385 storm_memset_tstats_zero(bp, BP_PORT(bp),
6386 params->rxq_params.stat_id);
6387 }
6388
6389 /* Fill the ramrod data */
6390 bnx2x_fill_cl_init_data(bp, params, activate, data);
6391
6392 /* SETUP ramrod.
6393 *
6394 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6395 * barrier except from mmiowb() is needed to impose a
6396 * proper ordering of memory operations.
6397 */
6398 mmiowb();
6399
6400
6401 bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
6402 U64_HI(data_mapping), U64_LO(data_mapping), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006403
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006404 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006405 rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
6406 params->ramrod_params.index,
6407 params->ramrod_params.pstate,
6408 ramrod_flags);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006409 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006410}
6411
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006412/**
6413 * Configure interrupt mode according to current configuration.
6414 * In case of MSI-X it will also try to enable MSI-X.
6415 *
6416 * @param bp
6417 *
6418 * @return int
6419 */
6420static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006421{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006422 int rc = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -07006423
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006424 switch (bp->int_mode) {
6425 case INT_MODE_MSI:
6426 bnx2x_enable_msi(bp);
6427 /* falling through... */
6428 case INT_MODE_INTx:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006429 bp->num_queues = 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006430 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
Eilon Greensteinca003922009-08-12 22:53:28 -07006431 break;
Eilon Greensteinca003922009-08-12 22:53:28 -07006432 default:
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006433 /* Set number of queues according to bp->multi_mode value */
6434 bnx2x_set_num_queues(bp);
6435
6436 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
6437 bp->num_queues);
6438
6439 /* if we can't use MSI-X we only need one fp,
6440 * so try to enable MSI-X with the requested number of fp's
6441 * and fallback to MSI or legacy INTx with one fp
6442 */
6443 rc = bnx2x_enable_msix(bp);
6444 if (rc) {
6445 /* failed to enable MSI-X */
6446 if (bp->multi_mode)
6447 DP(NETIF_MSG_IFUP,
6448 "Multi requested but failed to "
6449 "enable MSI-X (%d), "
6450 "set number of queues to %d\n",
6451 bp->num_queues,
6452 1);
6453 bp->num_queues = 1;
6454
6455 if (!(bp->flags & DISABLE_MSI_FLAG))
6456 bnx2x_enable_msi(bp);
6457 }
6458
Eilon Greensteinca003922009-08-12 22:53:28 -07006459 break;
6460 }
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006461
6462 return rc;
Eilon Greensteinca003922009-08-12 22:53:28 -07006463}
6464
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00006465/* must be called prioir to any HW initializations */
6466static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
6467{
6468 return L2_ILT_LINES(bp);
6469}
6470
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006471void bnx2x_ilt_set_info(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006472{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006473 struct ilt_client_info *ilt_client;
6474 struct bnx2x_ilt *ilt = BP_ILT(bp);
6475 u16 line = 0;
6476
6477 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
6478 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
6479
6480 /* CDU */
6481 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6482 ilt_client->client_num = ILT_CLIENT_CDU;
6483 ilt_client->page_size = CDU_ILT_PAGE_SZ;
6484 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6485 ilt_client->start = line;
6486 line += L2_ILT_LINES(bp);
6487#ifdef BCM_CNIC
6488 line += CNIC_ILT_LINES;
6489#endif
6490 ilt_client->end = line - 1;
6491
6492 DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6493 "flags 0x%x, hw psz %d\n",
6494 ilt_client->start,
6495 ilt_client->end,
6496 ilt_client->page_size,
6497 ilt_client->flags,
6498 ilog2(ilt_client->page_size >> 12));
6499
6500 /* QM */
6501 if (QM_INIT(bp->qm_cid_count)) {
6502 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6503 ilt_client->client_num = ILT_CLIENT_QM;
6504 ilt_client->page_size = QM_ILT_PAGE_SZ;
6505 ilt_client->flags = 0;
6506 ilt_client->start = line;
6507
6508 /* 4 bytes for each cid */
6509 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6510 QM_ILT_PAGE_SZ);
6511
6512 ilt_client->end = line - 1;
6513
6514 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6515 "flags 0x%x, hw psz %d\n",
6516 ilt_client->start,
6517 ilt_client->end,
6518 ilt_client->page_size,
6519 ilt_client->flags,
6520 ilog2(ilt_client->page_size >> 12));
6521
6522 }
6523 /* SRC */
6524 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6525#ifdef BCM_CNIC
6526 ilt_client->client_num = ILT_CLIENT_SRC;
6527 ilt_client->page_size = SRC_ILT_PAGE_SZ;
6528 ilt_client->flags = 0;
6529 ilt_client->start = line;
6530 line += SRC_ILT_LINES;
6531 ilt_client->end = line - 1;
6532
6533 DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6534 "flags 0x%x, hw psz %d\n",
6535 ilt_client->start,
6536 ilt_client->end,
6537 ilt_client->page_size,
6538 ilt_client->flags,
6539 ilog2(ilt_client->page_size >> 12));
6540
6541#else
6542 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6543#endif
6544
6545 /* TM */
6546 ilt_client = &ilt->clients[ILT_CLIENT_TM];
6547#ifdef BCM_CNIC
6548 ilt_client->client_num = ILT_CLIENT_TM;
6549 ilt_client->page_size = TM_ILT_PAGE_SZ;
6550 ilt_client->flags = 0;
6551 ilt_client->start = line;
6552 line += TM_ILT_LINES;
6553 ilt_client->end = line - 1;
6554
6555 DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6556 "flags 0x%x, hw psz %d\n",
6557 ilt_client->start,
6558 ilt_client->end,
6559 ilt_client->page_size,
6560 ilt_client->flags,
6561 ilog2(ilt_client->page_size >> 12));
6562
6563#else
6564 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6565#endif
6566}
6567int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6568 int is_leading)
6569{
6570 struct bnx2x_client_init_params params = { {0} };
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006571 int rc;
6572
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006573 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
6574 IGU_INT_ENABLE, 0);
6575
6576 params.ramrod_params.pstate = &fp->state;
6577 params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
6578 params.ramrod_params.index = fp->index;
6579 params.ramrod_params.cid = fp->cid;
6580
6581 if (is_leading)
6582 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
6583
6584 bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
6585
6586 bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
6587
6588 rc = bnx2x_setup_fw_client(bp, &params, 1,
6589 bnx2x_sp(bp, client_init_data),
6590 bnx2x_sp_mapping(bp, client_init_data));
6591 return rc;
6592}
6593
6594int bnx2x_stop_fw_client(struct bnx2x *bp, struct bnx2x_client_ramrod_params *p)
6595{
6596 int rc;
6597
6598 int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
6599
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006600 /* halt the connection */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006601 *p->pstate = BNX2X_FP_STATE_HALTING;
6602 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
6603 p->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006604
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006605 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006606 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
6607 p->pstate, poll_flag);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006608 if (rc) /* timeout */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006609 return rc;
6610
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006611 *p->pstate = BNX2X_FP_STATE_TERMINATING;
6612 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
6613 p->cl_id, 0);
6614 /* Wait for completion */
6615 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
6616 p->pstate, poll_flag);
6617 if (rc) /* timeout */
6618 return rc;
6619
6620
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006621 /* delete cfc entry */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006622 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006623
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006624 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006625 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
6626 p->pstate, WAIT_RAMROD_COMMON);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006627 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006628}
6629
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006630static int bnx2x_stop_client(struct bnx2x *bp, int index)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006631{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006632 struct bnx2x_client_ramrod_params client_stop = {0};
6633 struct bnx2x_fastpath *fp = &bp->fp[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006634
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006635 client_stop.index = index;
6636 client_stop.cid = fp->cid;
6637 client_stop.cl_id = fp->cl_id;
6638 client_stop.pstate = &(fp->state);
6639 client_stop.poll = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006640
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006641 return bnx2x_stop_fw_client(bp, &client_stop);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006642}
6643
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006644
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006645static void bnx2x_reset_func(struct bnx2x *bp)
6646{
6647 int port = BP_PORT(bp);
6648 int func = BP_FUNC(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006649 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006650 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006651 (CHIP_IS_E2(bp) ?
6652 offsetof(struct hc_status_block_data_e2, common) :
6653 offsetof(struct hc_status_block_data_e1x, common));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006654 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
6655 int pfid_offset = offsetof(struct pci_entity, pf_id);
6656
6657 /* Disable the function in the FW */
6658 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
6659 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
6660 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
6661 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
6662
6663 /* FP SBs */
6664 for_each_queue(bp, i) {
6665 struct bnx2x_fastpath *fp = &bp->fp[i];
6666 REG_WR8(bp,
6667 BAR_CSTRORM_INTMEM +
6668 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
6669 + pfunc_offset_fp + pfid_offset,
6670 HC_FUNCTION_DISABLED);
6671 }
6672
6673 /* SP SB */
6674 REG_WR8(bp,
6675 BAR_CSTRORM_INTMEM +
6676 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
6677 pfunc_offset_sp + pfid_offset,
6678 HC_FUNCTION_DISABLED);
6679
6680
6681 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
6682 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
6683 0);
Eliezer Tamir49d66772008-02-28 11:53:13 -08006684
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006685 /* Configure IGU */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006686 if (bp->common.int_block == INT_BLOCK_HC) {
6687 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6688 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6689 } else {
6690 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
6691 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
6692 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006693
Michael Chan37b091b2009-10-10 13:46:55 +00006694#ifdef BCM_CNIC
6695 /* Disable Timer scan */
6696 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
6697 /*
6698 * Wait for at least 10ms and up to 2 second for the timers scan to
6699 * complete
6700 */
6701 for (i = 0; i < 200; i++) {
6702 msleep(10);
6703 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
6704 break;
6705 }
6706#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006707 /* Clear ILT */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006708 bnx2x_clear_func_ilt(bp, func);
6709
6710 /* Timers workaround bug for E2: if this is vnic-3,
6711 * we need to set the entire ilt range for this timers.
6712 */
6713 if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
6714 struct ilt_client_info ilt_cli;
6715 /* use dummy TM client */
6716 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6717 ilt_cli.start = 0;
6718 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6719 ilt_cli.client_num = ILT_CLIENT_TM;
6720
6721 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
6722 }
6723
6724 /* this assumes that reset_port() called before reset_func()*/
6725 if (CHIP_IS_E2(bp))
6726 bnx2x_pf_disable(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006727
6728 bp->dmae_ready = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006729}
6730
6731static void bnx2x_reset_port(struct bnx2x *bp)
6732{
6733 int port = BP_PORT(bp);
6734 u32 val;
6735
6736 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6737
6738 /* Do not rcv packets to BRB */
6739 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6740 /* Do not direct rcv packets that are not for MCP to the BRB */
6741 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6742 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6743
6744 /* Configure AEU */
6745 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6746
6747 msleep(100);
6748 /* Check for BRB port occupancy */
6749 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6750 if (val)
6751 DP(NETIF_MSG_IFDOWN,
Eilon Greenstein33471622008-08-13 15:59:08 -07006752 "BRB1 is not empty %d blocks are occupied\n", val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006753
6754 /* TODO: Close Doorbell port? */
6755}
6756
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006757static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6758{
6759 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006760 BP_ABS_FUNC(bp), reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006761
6762 switch (reset_code) {
6763 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6764 bnx2x_reset_port(bp);
6765 bnx2x_reset_func(bp);
6766 bnx2x_reset_common(bp);
6767 break;
6768
6769 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6770 bnx2x_reset_port(bp);
6771 bnx2x_reset_func(bp);
6772 break;
6773
6774 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6775 bnx2x_reset_func(bp);
6776 break;
6777
6778 default:
6779 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6780 break;
6781 }
6782}
6783
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00006784void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006785{
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006786 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006787 u32 reset_code = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006788 int i, cnt, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006789
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006790 /* Wait until tx fastpath tasks complete */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006791 for_each_queue(bp, i) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08006792 struct bnx2x_fastpath *fp = &bp->fp[i];
6793
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006794 cnt = 1000;
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -08006795 while (bnx2x_has_tx_work_unload(fp)) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006796
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006797 if (!cnt) {
6798 BNX2X_ERR("timeout waiting for queue[%d]\n",
6799 i);
6800#ifdef BNX2X_STOP_ON_ERROR
6801 bnx2x_panic();
6802 return -EBUSY;
6803#else
6804 break;
6805#endif
6806 }
6807 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006808 msleep(1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006809 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08006810 }
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006811 /* Give HW time to discard old tx messages */
6812 msleep(1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006813
Yitchak Gertner65abd742008-08-25 15:26:24 -07006814 if (CHIP_IS_E1(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006815 /* invalidate mc list,
6816 * wait and poll (interrupts are off)
6817 */
6818 bnx2x_invlidate_e1_mc_list(bp);
6819 bnx2x_set_eth_mac(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07006820
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006821 } else {
Yitchak Gertner65abd742008-08-25 15:26:24 -07006822 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6823
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006824 bnx2x_set_eth_mac(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07006825
6826 for (i = 0; i < MC_HASH_SIZE; i++)
6827 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6828 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006829
Michael Chan993ac7b2009-10-10 13:46:56 +00006830#ifdef BCM_CNIC
6831 /* Clear iSCSI L2 MAC */
6832 mutex_lock(&bp->cnic_mutex);
6833 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
6834 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
6835 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
6836 }
6837 mutex_unlock(&bp->cnic_mutex);
6838#endif
Yitchak Gertner65abd742008-08-25 15:26:24 -07006839
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006840 if (unload_mode == UNLOAD_NORMAL)
6841 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eliezer Tamir228241e2008-02-28 11:56:57 -08006842
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00006843 else if (bp->flags & NO_WOL_FLAG)
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006844 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006845
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00006846 else if (bp->wol) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006847 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006848 u8 *mac_addr = bp->dev->dev_addr;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006849 u32 val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006850 /* The mac address is written to entries 1-4 to
6851 preserve entry 0 which is used by the PMF */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006852 u8 entry = (BP_E1HVN(bp) + 1)*8;
6853
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006854 val = (mac_addr[0] << 8) | mac_addr[1];
Eilon Greenstein3196a882008-08-13 15:58:49 -07006855 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006856
6857 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6858 (mac_addr[4] << 8) | mac_addr[5];
Eilon Greenstein3196a882008-08-13 15:58:49 -07006859 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006860
6861 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
Eliezer Tamir228241e2008-02-28 11:56:57 -08006862
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006863 } else
6864 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6865
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006866 /* Close multi and leading connections
6867 Completions for ramrods are collected in a synchronous way */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006868 for_each_queue(bp, i)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006869
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006870 if (bnx2x_stop_client(bp, i))
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006871#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006872 return;
6873#else
6874 goto unload_error;
6875#endif
6876
6877 rc = bnx2x_func_stop(bp);
6878 if (rc) {
6879 BNX2X_ERR("Function stop failed!\n");
6880#ifdef BNX2X_STOP_ON_ERROR
6881 return;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006882#else
6883 goto unload_error;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006884#endif
Eliezer Tamir228241e2008-02-28 11:56:57 -08006885 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006886#ifndef BNX2X_STOP_ON_ERROR
Eliezer Tamir228241e2008-02-28 11:56:57 -08006887unload_error:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006888#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006889 if (!BP_NOMCP(bp))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006890 reset_code = bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006891 else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006892 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
6893 "%d, %d, %d\n", BP_PATH(bp),
6894 load_count[BP_PATH(bp)][0],
6895 load_count[BP_PATH(bp)][1],
6896 load_count[BP_PATH(bp)][2]);
6897 load_count[BP_PATH(bp)][0]--;
6898 load_count[BP_PATH(bp)][1 + port]--;
6899 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
6900 "%d, %d, %d\n", BP_PATH(bp),
6901 load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
6902 load_count[BP_PATH(bp)][2]);
6903 if (load_count[BP_PATH(bp)][0] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006904 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006905 else if (load_count[BP_PATH(bp)][1 + port] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006906 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6907 else
6908 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6909 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006910
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006911 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6912 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6913 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006914
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006915 /* Disable HW interrupts, NAPI */
6916 bnx2x_netif_stop(bp, 1);
6917
6918 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006919 bnx2x_free_irq(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006920
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006921 /* Reset the chip */
Eliezer Tamir228241e2008-02-28 11:56:57 -08006922 bnx2x_reset_chip(bp, reset_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006923
6924 /* Report UNLOAD_DONE to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006925 if (!BP_NOMCP(bp))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006926 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006927
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006928}
6929
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00006930void bnx2x_disable_close_the_gate(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006931{
6932 u32 val;
6933
6934 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
6935
6936 if (CHIP_IS_E1(bp)) {
6937 int port = BP_PORT(bp);
6938 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6939 MISC_REG_AEU_MASK_ATTN_FUNC_0;
6940
6941 val = REG_RD(bp, addr);
6942 val &= ~(0x300);
6943 REG_WR(bp, addr, val);
6944 } else if (CHIP_IS_E1H(bp)) {
6945 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
6946 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
6947 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
6948 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
6949 }
6950}
6951
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006952
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006953/* Close gates #2, #3 and #4: */
6954static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
6955{
6956 u32 val, addr;
6957
6958 /* Gates #2 and #4a are closed/opened for "not E1" only */
6959 if (!CHIP_IS_E1(bp)) {
6960 /* #4 */
6961 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
6962 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
6963 close ? (val | 0x1) : (val & (~(u32)1)));
6964 /* #2 */
6965 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
6966 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
6967 close ? (val | 0x1) : (val & (~(u32)1)));
6968 }
6969
6970 /* #3 */
6971 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
6972 val = REG_RD(bp, addr);
6973 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
6974
6975 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
6976 close ? "closing" : "opening");
6977 mmiowb();
6978}
6979
6980#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
6981
6982static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
6983{
6984 /* Do some magic... */
6985 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
6986 *magic_val = val & SHARED_MF_CLP_MAGIC;
6987 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
6988}
6989
6990/* Restore the value of the `magic' bit.
6991 *
6992 * @param pdev Device handle.
6993 * @param magic_val Old value of the `magic' bit.
6994 */
6995static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
6996{
6997 /* Restore the `magic' bit value... */
6998 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
6999 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
7000 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
7001 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7002 MF_CFG_WR(bp, shared_mf_config.clp_mb,
7003 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
7004}
7005
7006/* Prepares for MCP reset: takes care of CLP configurations.
7007 *
7008 * @param bp
7009 * @param magic_val Old value of 'magic' bit.
7010 */
7011static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
7012{
7013 u32 shmem;
7014 u32 validity_offset;
7015
7016 DP(NETIF_MSG_HW, "Starting\n");
7017
7018 /* Set `magic' bit in order to save MF config */
7019 if (!CHIP_IS_E1(bp))
7020 bnx2x_clp_reset_prep(bp, magic_val);
7021
7022 /* Get shmem offset */
7023 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7024 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7025
7026 /* Clear validity map flags */
7027 if (shmem > 0)
7028 REG_WR(bp, shmem + validity_offset, 0);
7029}
7030
7031#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
7032#define MCP_ONE_TIMEOUT 100 /* 100 ms */
7033
7034/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
7035 * depending on the HW type.
7036 *
7037 * @param bp
7038 */
7039static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
7040{
7041 /* special handling for emulation and FPGA,
7042 wait 10 times longer */
7043 if (CHIP_REV_IS_SLOW(bp))
7044 msleep(MCP_ONE_TIMEOUT*10);
7045 else
7046 msleep(MCP_ONE_TIMEOUT);
7047}
7048
7049static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
7050{
7051 u32 shmem, cnt, validity_offset, val;
7052 int rc = 0;
7053
7054 msleep(100);
7055
7056 /* Get shmem offset */
7057 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7058 if (shmem == 0) {
7059 BNX2X_ERR("Shmem 0 return failure\n");
7060 rc = -ENOTTY;
7061 goto exit_lbl;
7062 }
7063
7064 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7065
7066 /* Wait for MCP to come up */
7067 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
7068 /* TBD: its best to check validity map of last port.
7069 * currently checks on port 0.
7070 */
7071 val = REG_RD(bp, shmem + validity_offset);
7072 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
7073 shmem + validity_offset, val);
7074
7075 /* check that shared memory is valid. */
7076 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7077 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7078 break;
7079
7080 bnx2x_mcp_wait_one(bp);
7081 }
7082
7083 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
7084
7085 /* Check that shared memory is valid. This indicates that MCP is up. */
7086 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
7087 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
7088 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
7089 rc = -ENOTTY;
7090 goto exit_lbl;
7091 }
7092
7093exit_lbl:
7094 /* Restore the `magic' bit value */
7095 if (!CHIP_IS_E1(bp))
7096 bnx2x_clp_reset_done(bp, magic_val);
7097
7098 return rc;
7099}
7100
7101static void bnx2x_pxp_prep(struct bnx2x *bp)
7102{
7103 if (!CHIP_IS_E1(bp)) {
7104 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
7105 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
7106 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
7107 mmiowb();
7108 }
7109}
7110
7111/*
7112 * Reset the whole chip except for:
7113 * - PCIE core
7114 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
7115 * one reset bit)
7116 * - IGU
7117 * - MISC (including AEU)
7118 * - GRC
7119 * - RBCN, RBCP
7120 */
7121static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
7122{
7123 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
7124
7125 not_reset_mask1 =
7126 MISC_REGISTERS_RESET_REG_1_RST_HC |
7127 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
7128 MISC_REGISTERS_RESET_REG_1_RST_PXP;
7129
7130 not_reset_mask2 =
7131 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
7132 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
7133 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
7134 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
7135 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
7136 MISC_REGISTERS_RESET_REG_2_RST_GRC |
7137 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
7138 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
7139
7140 reset_mask1 = 0xffffffff;
7141
7142 if (CHIP_IS_E1(bp))
7143 reset_mask2 = 0xffff;
7144 else
7145 reset_mask2 = 0x1ffff;
7146
7147 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7148 reset_mask1 & (~not_reset_mask1));
7149 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7150 reset_mask2 & (~not_reset_mask2));
7151
7152 barrier();
7153 mmiowb();
7154
7155 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
7156 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
7157 mmiowb();
7158}
7159
7160static int bnx2x_process_kill(struct bnx2x *bp)
7161{
7162 int cnt = 1000;
7163 u32 val = 0;
7164 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
7165
7166
7167 /* Empty the Tetris buffer, wait for 1s */
7168 do {
7169 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
7170 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
7171 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
7172 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
7173 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
7174 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
7175 ((port_is_idle_0 & 0x1) == 0x1) &&
7176 ((port_is_idle_1 & 0x1) == 0x1) &&
7177 (pgl_exp_rom2 == 0xffffffff))
7178 break;
7179 msleep(1);
7180 } while (cnt-- > 0);
7181
7182 if (cnt <= 0) {
7183 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
7184 " are still"
7185 " outstanding read requests after 1s!\n");
7186 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
7187 " port_is_idle_0=0x%08x,"
7188 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
7189 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
7190 pgl_exp_rom2);
7191 return -EAGAIN;
7192 }
7193
7194 barrier();
7195
7196 /* Close gates #2, #3 and #4 */
7197 bnx2x_set_234_gates(bp, true);
7198
7199 /* TBD: Indicate that "process kill" is in progress to MCP */
7200
7201 /* Clear "unprepared" bit */
7202 REG_WR(bp, MISC_REG_UNPREPARED, 0);
7203 barrier();
7204
7205 /* Make sure all is written to the chip before the reset */
7206 mmiowb();
7207
7208 /* Wait for 1ms to empty GLUE and PCI-E core queues,
7209 * PSWHST, GRC and PSWRD Tetris buffer.
7210 */
7211 msleep(1);
7212
7213 /* Prepare to chip reset: */
7214 /* MCP */
7215 bnx2x_reset_mcp_prep(bp, &val);
7216
7217 /* PXP */
7218 bnx2x_pxp_prep(bp);
7219 barrier();
7220
7221 /* reset the chip */
7222 bnx2x_process_kill_chip_reset(bp);
7223 barrier();
7224
7225 /* Recover after reset: */
7226 /* MCP */
7227 if (bnx2x_reset_mcp_comp(bp, val))
7228 return -EAGAIN;
7229
7230 /* PXP */
7231 bnx2x_pxp_prep(bp);
7232
7233 /* Open the gates #2, #3 and #4 */
7234 bnx2x_set_234_gates(bp, false);
7235
7236 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7237 * reset state, re-enable attentions. */
7238
7239 return 0;
7240}
7241
7242static int bnx2x_leader_reset(struct bnx2x *bp)
7243{
7244 int rc = 0;
7245 /* Try to recover after the failure */
7246 if (bnx2x_process_kill(bp)) {
7247 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
7248 bp->dev->name);
7249 rc = -EAGAIN;
7250 goto exit_leader_reset;
7251 }
7252
7253 /* Clear "reset is in progress" bit and update the driver state */
7254 bnx2x_set_reset_done(bp);
7255 bp->recovery_state = BNX2X_RECOVERY_DONE;
7256
7257exit_leader_reset:
7258 bp->is_leader = 0;
7259 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
7260 smp_wmb();
7261 return rc;
7262}
7263
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007264/* Assumption: runs under rtnl lock. This together with the fact
7265 * that it's called only from bnx2x_reset_task() ensure that it
7266 * will never be called when netif_running(bp->dev) is false.
7267 */
7268static void bnx2x_parity_recover(struct bnx2x *bp)
7269{
7270 DP(NETIF_MSG_HW, "Handling parity\n");
7271 while (1) {
7272 switch (bp->recovery_state) {
7273 case BNX2X_RECOVERY_INIT:
7274 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
7275 /* Try to get a LEADER_LOCK HW lock */
7276 if (bnx2x_trylock_hw_lock(bp,
7277 HW_LOCK_RESOURCE_RESERVED_08))
7278 bp->is_leader = 1;
7279
7280 /* Stop the driver */
7281 /* If interface has been removed - break */
7282 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
7283 return;
7284
7285 bp->recovery_state = BNX2X_RECOVERY_WAIT;
7286 /* Ensure "is_leader" and "recovery_state"
7287 * update values are seen on other CPUs
7288 */
7289 smp_wmb();
7290 break;
7291
7292 case BNX2X_RECOVERY_WAIT:
7293 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
7294 if (bp->is_leader) {
7295 u32 load_counter = bnx2x_get_load_cnt(bp);
7296 if (load_counter) {
7297 /* Wait until all other functions get
7298 * down.
7299 */
7300 schedule_delayed_work(&bp->reset_task,
7301 HZ/10);
7302 return;
7303 } else {
7304 /* If all other functions got down -
7305 * try to bring the chip back to
7306 * normal. In any case it's an exit
7307 * point for a leader.
7308 */
7309 if (bnx2x_leader_reset(bp) ||
7310 bnx2x_nic_load(bp, LOAD_NORMAL)) {
7311 printk(KERN_ERR"%s: Recovery "
7312 "has failed. Power cycle is "
7313 "needed.\n", bp->dev->name);
7314 /* Disconnect this device */
7315 netif_device_detach(bp->dev);
7316 /* Block ifup for all function
7317 * of this ASIC until
7318 * "process kill" or power
7319 * cycle.
7320 */
7321 bnx2x_set_reset_in_progress(bp);
7322 /* Shut down the power */
7323 bnx2x_set_power_state(bp,
7324 PCI_D3hot);
7325 return;
7326 }
7327
7328 return;
7329 }
7330 } else { /* non-leader */
7331 if (!bnx2x_reset_is_done(bp)) {
7332 /* Try to get a LEADER_LOCK HW lock as
7333 * long as a former leader may have
7334 * been unloaded by the user or
7335 * released a leadership by another
7336 * reason.
7337 */
7338 if (bnx2x_trylock_hw_lock(bp,
7339 HW_LOCK_RESOURCE_RESERVED_08)) {
7340 /* I'm a leader now! Restart a
7341 * switch case.
7342 */
7343 bp->is_leader = 1;
7344 break;
7345 }
7346
7347 schedule_delayed_work(&bp->reset_task,
7348 HZ/10);
7349 return;
7350
7351 } else { /* A leader has completed
7352 * the "process kill". It's an exit
7353 * point for a non-leader.
7354 */
7355 bnx2x_nic_load(bp, LOAD_NORMAL);
7356 bp->recovery_state =
7357 BNX2X_RECOVERY_DONE;
7358 smp_wmb();
7359 return;
7360 }
7361 }
7362 default:
7363 return;
7364 }
7365 }
7366}
7367
7368/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7369 * scheduled on a general queue in order to prevent a dead lock.
7370 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007371static void bnx2x_reset_task(struct work_struct *work)
7372{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007373 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007374
7375#ifdef BNX2X_STOP_ON_ERROR
7376 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7377 " so reset not done to allow debug dump,\n"
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007378 KERN_ERR " you will need to reboot when done\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007379 return;
7380#endif
7381
7382 rtnl_lock();
7383
7384 if (!netif_running(bp->dev))
7385 goto reset_task_exit;
7386
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007387 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
7388 bnx2x_parity_recover(bp);
7389 else {
7390 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7391 bnx2x_nic_load(bp, LOAD_NORMAL);
7392 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007393
7394reset_task_exit:
7395 rtnl_unlock();
7396}
7397
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007398/* end of nic load/unload */
7399
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007400/*
7401 * Init service functions
7402 */
7403
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007404u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007405{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007406 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
7407 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
7408 return base + (BP_ABS_FUNC(bp)) * stride;
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007409}
7410
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007411static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007412{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007413 u32 reg = bnx2x_get_pretend_reg(bp);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007414
7415 /* Flush all outstanding writes */
7416 mmiowb();
7417
7418 /* Pretend to be function 0 */
7419 REG_WR(bp, reg, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007420 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007421
7422 /* From now we are in the "like-E1" mode */
7423 bnx2x_int_disable(bp);
7424
7425 /* Flush all outstanding writes */
7426 mmiowb();
7427
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007428 /* Restore the original function */
7429 REG_WR(bp, reg, BP_ABS_FUNC(bp));
7430 REG_RD(bp, reg);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007431}
7432
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007433static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007434{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007435 if (CHIP_IS_E1(bp))
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007436 bnx2x_int_disable(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007437 else
7438 bnx2x_undi_int_disable_e1h(bp);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007439}
7440
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007441static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007442{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007443 u32 val;
7444
7445 /* Check if there is any driver already loaded */
7446 val = REG_RD(bp, MISC_REG_UNPREPARED);
7447 if (val == 0x1) {
7448 /* Check if it is the UNDI driver
7449 * UNDI driver initializes CID offset for normal bell to 0x7
7450 */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07007451 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007452 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7453 if (val == 0x7) {
7454 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007455 /* save our pf_num */
7456 int orig_pf_num = bp->pf_num;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007457 u32 swap_en;
7458 u32 swap_val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007459
Eilon Greensteinb4661732009-01-14 06:43:56 +00007460 /* clear the UNDI indication */
7461 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7462
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007463 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7464
7465 /* try unload UNDI on port 0 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007466 bp->pf_num = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007467 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007468 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007469 DRV_MSG_SEQ_NUMBER_MASK);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007470 reset_code = bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007471
7472 /* if UNDI is loaded on the other port */
7473 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7474
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007475 /* send "DONE" for previous unload */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007476 bnx2x_fw_command(bp,
7477 DRV_MSG_CODE_UNLOAD_DONE, 0);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007478
7479 /* unload UNDI on port 1 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007480 bp->pf_num = 1;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007481 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007482 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007483 DRV_MSG_SEQ_NUMBER_MASK);
7484 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007485
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007486 bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007487 }
7488
Eilon Greensteinb4661732009-01-14 06:43:56 +00007489 /* now it's safe to release the lock */
7490 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7491
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007492 bnx2x_undi_int_disable(bp);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007493
7494 /* close input traffic and wait for it */
7495 /* Do not rcv packets to BRB */
7496 REG_WR(bp,
7497 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7498 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7499 /* Do not direct rcv packets that are not for MCP to
7500 * the BRB */
7501 REG_WR(bp,
7502 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7503 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7504 /* clear AEU */
7505 REG_WR(bp,
7506 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7507 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7508 msleep(10);
7509
7510 /* save NIG port swap info */
7511 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7512 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007513 /* reset device */
7514 REG_WR(bp,
7515 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007516 0xd3ffffff);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007517 REG_WR(bp,
7518 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7519 0x1403);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007520 /* take the NIG out of reset and restore swap values */
7521 REG_WR(bp,
7522 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7523 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7524 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7525 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7526
7527 /* send unload done to the MCP */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007528 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007529
7530 /* restore our func and fw_seq */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007531 bp->pf_num = orig_pf_num;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007532 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007533 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007534 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greensteinb4661732009-01-14 06:43:56 +00007535
7536 } else
7537 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007538 }
7539}
7540
7541static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7542{
7543 u32 val, val2, val3, val4, id;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07007544 u16 pmc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007545
7546 /* Get the chip revision id and number. */
7547 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7548 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7549 id = ((val & 0xffff) << 16);
7550 val = REG_RD(bp, MISC_REG_CHIP_REV);
7551 id |= ((val & 0xf) << 12);
7552 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7553 id |= ((val & 0xff) << 4);
Eilon Greenstein5a40e082009-01-14 06:44:04 +00007554 val = REG_RD(bp, MISC_REG_BOND_ID);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007555 id |= (val & 0xf);
7556 bp->common.chip_id = id;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007557
7558 /* Set doorbell size */
7559 bp->db_size = (1 << BNX2X_DB_SHIFT);
7560
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007561 if (CHIP_IS_E2(bp)) {
7562 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7563 if ((val & 1) == 0)
7564 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7565 else
7566 val = (val >> 1) & 1;
7567 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7568 "2_PORT_MODE");
7569 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7570 CHIP_2_PORT_MODE;
7571
7572 if (CHIP_MODE_IS_4_PORT(bp))
7573 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
7574 else
7575 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
7576 } else {
7577 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
7578 bp->pfid = bp->pf_num; /* 0..7 */
7579 }
7580
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007581 /*
7582 * set base FW non-default (fast path) status block id, this value is
7583 * used to initialize the fw_sb_id saved on the fp/queue structure to
7584 * determine the id used by the FW.
7585 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007586 if (CHIP_IS_E1x(bp))
7587 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7588 else /* E2 */
7589 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7590
7591 bp->link_params.chip_id = bp->common.chip_id;
7592 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007593
Eilon Greenstein1c063282009-02-12 08:36:43 +00007594 val = (REG_RD(bp, 0x2874) & 0x55);
7595 if ((bp->common.chip_id & 0x1) ||
7596 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7597 bp->flags |= ONE_PORT_FLAG;
7598 BNX2X_DEV_INFO("single port device\n");
7599 }
7600
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007601 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7602 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7603 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7604 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7605 bp->common.flash_size, bp->common.flash_size);
7606
7607 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007608 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
7609 MISC_REG_GENERIC_CR_1 :
7610 MISC_REG_GENERIC_CR_0));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007611 bp->link_params.shmem_base = bp->common.shmem_base;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007612 bp->link_params.shmem2_base = bp->common.shmem2_base;
Eilon Greenstein2691d512009-08-12 08:22:08 +00007613 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7614 bp->common.shmem_base, bp->common.shmem2_base);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007615
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007616 if (!bp->common.shmem_base) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007617 BNX2X_DEV_INFO("MCP not active\n");
7618 bp->flags |= NO_MCP_FLAG;
7619 return;
7620 }
7621
7622 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7623 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7624 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007625 BNX2X_ERR("BAD MCP validity signature\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007626
7627 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00007628 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007629
7630 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7631 SHARED_HW_CFG_LED_MODE_MASK) >>
7632 SHARED_HW_CFG_LED_MODE_SHIFT);
7633
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00007634 bp->link_params.feature_config_flags = 0;
7635 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7636 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7637 bp->link_params.feature_config_flags |=
7638 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7639 else
7640 bp->link_params.feature_config_flags &=
7641 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7642
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007643 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7644 bp->common.bc_ver = val;
7645 BNX2X_DEV_INFO("bc_ver %X\n", val);
7646 if (val < BNX2X_BC_VER) {
7647 /* for now only warn
7648 * later we might need to enforce this */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007649 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
7650 "please upgrade BC\n", BNX2X_BC_VER, val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007651 }
Eilon Greenstein4d295db2009-07-21 05:47:47 +00007652 bp->link_params.feature_config_flags |=
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007653 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
Eilon Greenstein4d295db2009-07-21 05:47:47 +00007654 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007655 bp->link_params.feature_config_flags |=
7656 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
7657 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07007658
7659 if (BP_E1HVN(bp) == 0) {
7660 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7661 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7662 } else {
7663 /* no WOL capability for E1HVN != 0 */
7664 bp->flags |= NO_WOL_FLAG;
7665 }
7666 BNX2X_DEV_INFO("%sWoL capable\n",
Eilon Greensteinf5372252009-02-12 08:38:30 +00007667 (bp->flags & NO_WOL_FLAG) ? "not " : "");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007668
7669 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7670 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7671 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7672 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7673
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007674 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
7675 val, val2, val3, val4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007676}
7677
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007678#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7679#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7680
7681static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7682{
7683 int pfid = BP_FUNC(bp);
7684 int vn = BP_E1HVN(bp);
7685 int igu_sb_id;
7686 u32 val;
7687 u8 fid;
7688
7689 bp->igu_base_sb = 0xff;
7690 bp->igu_sb_cnt = 0;
7691 if (CHIP_INT_MODE_IS_BC(bp)) {
7692 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
7693 bp->l2_cid_count);
7694
7695 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
7696 FP_SB_MAX_E1x;
7697
7698 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
7699 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
7700
7701 return;
7702 }
7703
7704 /* IGU in normal mode - read CAM */
7705 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
7706 igu_sb_id++) {
7707 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
7708 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
7709 continue;
7710 fid = IGU_FID(val);
7711 if ((fid & IGU_FID_ENCODE_IS_PF)) {
7712 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
7713 continue;
7714 if (IGU_VEC(val) == 0)
7715 /* default status block */
7716 bp->igu_dsb_id = igu_sb_id;
7717 else {
7718 if (bp->igu_base_sb == 0xff)
7719 bp->igu_base_sb = igu_sb_id;
7720 bp->igu_sb_cnt++;
7721 }
7722 }
7723 }
7724 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, bp->l2_cid_count);
7725 if (bp->igu_sb_cnt == 0)
7726 BNX2X_ERR("CAM configuration error\n");
7727}
7728
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007729static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7730 u32 switch_cfg)
7731{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007732 int cfg_size = 0, idx, port = BP_PORT(bp);
7733
7734 /* Aggregation of supported attributes of all external phys */
7735 bp->port.supported[0] = 0;
7736 bp->port.supported[1] = 0;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007737 switch (bp->link_params.num_phys) {
7738 case 1:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007739 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
7740 cfg_size = 1;
7741 break;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007742 case 2:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007743 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
7744 cfg_size = 1;
7745 break;
7746 case 3:
7747 if (bp->link_params.multi_phy_config &
7748 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
7749 bp->port.supported[1] =
7750 bp->link_params.phy[EXT_PHY1].supported;
7751 bp->port.supported[0] =
7752 bp->link_params.phy[EXT_PHY2].supported;
7753 } else {
7754 bp->port.supported[0] =
7755 bp->link_params.phy[EXT_PHY1].supported;
7756 bp->port.supported[1] =
7757 bp->link_params.phy[EXT_PHY2].supported;
7758 }
7759 cfg_size = 2;
7760 break;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007761 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007762
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007763 if (!(bp->port.supported[0] || bp->port.supported[1])) {
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007764 BNX2X_ERR("NVRAM config error. BAD phy config."
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007765 "PHY1 config 0x%x, PHY2 config 0x%x\n",
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007766 SHMEM_RD(bp,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007767 dev_info.port_hw_config[port].external_phy_config),
7768 SHMEM_RD(bp,
7769 dev_info.port_hw_config[port].external_phy_config2));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007770 return;
7771 }
7772
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007773 switch (switch_cfg) {
7774 case SWITCH_CFG_1G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007775 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7776 port*0x10);
7777 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007778 break;
7779
7780 case SWITCH_CFG_10G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007781 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7782 port*0x18);
7783 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007784
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007785 break;
7786
7787 default:
7788 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007789 bp->port.link_config[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007790 return;
7791 }
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007792 /* mask what we support according to speed_cap_mask per configuration */
7793 for (idx = 0; idx < cfg_size; idx++) {
7794 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007795 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007796 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007797
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007798 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007799 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007800 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007801
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007802 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007803 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007804 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007805
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007806 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007807 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007808 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007809
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007810 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007811 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007812 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007813 SUPPORTED_1000baseT_Full);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007814
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007815 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007816 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007817 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007818
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007819 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007820 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007821 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007822
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007823 }
7824
7825 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
7826 bp->port.supported[1]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007827}
7828
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007829static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007830{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007831 u32 link_config, idx, cfg_size = 0;
7832 bp->port.advertising[0] = 0;
7833 bp->port.advertising[1] = 0;
7834 switch (bp->link_params.num_phys) {
7835 case 1:
7836 case 2:
7837 cfg_size = 1;
7838 break;
7839 case 3:
7840 cfg_size = 2;
7841 break;
7842 }
7843 for (idx = 0; idx < cfg_size; idx++) {
7844 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
7845 link_config = bp->port.link_config[idx];
7846 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007847 case PORT_FEATURE_LINK_SPEED_AUTO:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007848 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
7849 bp->link_params.req_line_speed[idx] =
7850 SPEED_AUTO_NEG;
7851 bp->port.advertising[idx] |=
7852 bp->port.supported[idx];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007853 } else {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007854 /* force 10G, no AN */
7855 bp->link_params.req_line_speed[idx] =
7856 SPEED_10000;
7857 bp->port.advertising[idx] |=
7858 (ADVERTISED_10000baseT_Full |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007859 ADVERTISED_FIBRE);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007860 continue;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007861 }
7862 break;
7863
7864 case PORT_FEATURE_LINK_SPEED_10M_FULL:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007865 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
7866 bp->link_params.req_line_speed[idx] =
7867 SPEED_10;
7868 bp->port.advertising[idx] |=
7869 (ADVERTISED_10baseT_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007870 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007871 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007872 BNX2X_ERROR("NVRAM config error. "
7873 "Invalid link_config 0x%x"
7874 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007875 link_config,
7876 bp->link_params.speed_cap_mask[idx]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007877 return;
7878 }
7879 break;
7880
7881 case PORT_FEATURE_LINK_SPEED_10M_HALF:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007882 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
7883 bp->link_params.req_line_speed[idx] =
7884 SPEED_10;
7885 bp->link_params.req_duplex[idx] =
7886 DUPLEX_HALF;
7887 bp->port.advertising[idx] |=
7888 (ADVERTISED_10baseT_Half |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007889 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007890 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007891 BNX2X_ERROR("NVRAM config error. "
7892 "Invalid link_config 0x%x"
7893 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007894 link_config,
7895 bp->link_params.speed_cap_mask[idx]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007896 return;
7897 }
7898 break;
7899
7900 case PORT_FEATURE_LINK_SPEED_100M_FULL:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007901 if (bp->port.supported[idx] & SUPPORTED_100baseT_Full) {
7902 bp->link_params.req_line_speed[idx] =
7903 SPEED_100;
7904 bp->port.advertising[idx] |=
7905 (ADVERTISED_100baseT_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007906 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007907 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007908 BNX2X_ERROR("NVRAM config error. "
7909 "Invalid link_config 0x%x"
7910 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007911 link_config,
7912 bp->link_params.speed_cap_mask[idx]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007913 return;
7914 }
7915 break;
7916
7917 case PORT_FEATURE_LINK_SPEED_100M_HALF:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007918 if (bp->port.supported[idx] & SUPPORTED_100baseT_Half) {
7919 bp->link_params.req_line_speed[idx] = SPEED_100;
7920 bp->link_params.req_duplex[idx] = DUPLEX_HALF;
7921 bp->port.advertising[idx] |=
7922 (ADVERTISED_100baseT_Half |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007923 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007924 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007925 BNX2X_ERROR("NVRAM config error. "
7926 "Invalid link_config 0x%x"
7927 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007928 link_config,
7929 bp->link_params.speed_cap_mask[idx]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007930 return;
7931 }
7932 break;
7933
7934 case PORT_FEATURE_LINK_SPEED_1G:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007935 if (bp->port.supported[idx] &
7936 SUPPORTED_1000baseT_Full) {
7937 bp->link_params.req_line_speed[idx] =
7938 SPEED_1000;
7939 bp->port.advertising[idx] |=
7940 (ADVERTISED_1000baseT_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007941 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007942 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007943 BNX2X_ERROR("NVRAM config error. "
7944 "Invalid link_config 0x%x"
7945 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007946 link_config,
7947 bp->link_params.speed_cap_mask[idx]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007948 return;
7949 }
7950 break;
7951
7952 case PORT_FEATURE_LINK_SPEED_2_5G:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007953 if (bp->port.supported[idx] &
7954 SUPPORTED_2500baseX_Full) {
7955 bp->link_params.req_line_speed[idx] =
7956 SPEED_2500;
7957 bp->port.advertising[idx] |=
7958 (ADVERTISED_2500baseX_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007959 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007960 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007961 BNX2X_ERROR("NVRAM config error. "
7962 "Invalid link_config 0x%x"
7963 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007964 link_config,
7965 bp->link_params.speed_cap_mask[idx]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007966 return;
7967 }
7968 break;
7969
7970 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7971 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7972 case PORT_FEATURE_LINK_SPEED_10G_KR:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007973 if (bp->port.supported[idx] &
7974 SUPPORTED_10000baseT_Full) {
7975 bp->link_params.req_line_speed[idx] =
7976 SPEED_10000;
7977 bp->port.advertising[idx] |=
7978 (ADVERTISED_10000baseT_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007979 ADVERTISED_FIBRE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007980 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007981 BNX2X_ERROR("NVRAM config error. "
7982 "Invalid link_config 0x%x"
7983 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007984 link_config,
7985 bp->link_params.speed_cap_mask[idx]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007986 return;
7987 }
7988 break;
7989
7990 default:
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007991 BNX2X_ERROR("NVRAM config error. "
7992 "BAD link speed link_config 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007993 link_config);
7994 bp->link_params.req_line_speed[idx] = SPEED_AUTO_NEG;
7995 bp->port.advertising[idx] = bp->port.supported[idx];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007996 break;
7997 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007998
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007999 bp->link_params.req_flow_ctrl[idx] = (link_config &
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008000 PORT_FEATURE_FLOW_CONTROL_MASK);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008001 if ((bp->link_params.req_flow_ctrl[idx] ==
8002 BNX2X_FLOW_CTRL_AUTO) &&
8003 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
8004 bp->link_params.req_flow_ctrl[idx] =
8005 BNX2X_FLOW_CTRL_NONE;
8006 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008007
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008008 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
8009 " 0x%x advertising 0x%x\n",
8010 bp->link_params.req_line_speed[idx],
8011 bp->link_params.req_duplex[idx],
8012 bp->link_params.req_flow_ctrl[idx],
8013 bp->port.advertising[idx]);
8014 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008015}
8016
Michael Chane665bfd2009-10-10 13:46:54 +00008017static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8018{
8019 mac_hi = cpu_to_be16(mac_hi);
8020 mac_lo = cpu_to_be32(mac_lo);
8021 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8022 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8023}
8024
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008025static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008026{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008027 int port = BP_PORT(bp);
8028 u32 val, val2;
Eilon Greenstein589abe32009-02-12 08:36:55 +00008029 u32 config;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008030 u32 ext_phy_type, ext_phy_config;;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008031
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008032 bp->link_params.bp = bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008033 bp->link_params.port = port;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008034
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008035 bp->link_params.lane_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008036 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008037
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008038 bp->link_params.speed_cap_mask[0] =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008039 SHMEM_RD(bp,
8040 dev_info.port_hw_config[port].speed_capability_mask);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008041 bp->link_params.speed_cap_mask[1] =
8042 SHMEM_RD(bp,
8043 dev_info.port_hw_config[port].speed_capability_mask2);
8044 bp->port.link_config[0] =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008045 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8046
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008047 bp->port.link_config[1] =
8048 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008049
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008050 bp->link_params.multi_phy_config =
8051 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008052 /* If the device is capable of WoL, set the default state according
8053 * to the HW
8054 */
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008055 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008056 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8057 (config & PORT_FEATURE_WOL_ENABLED));
8058
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008059 BNX2X_DEV_INFO("lane_config 0x%08x"
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008060 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008061 bp->link_params.lane_config,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008062 bp->link_params.speed_cap_mask[0],
8063 bp->port.link_config[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008064
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008065 bp->link_params.switch_cfg = (bp->port.link_config[0] &
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008066 PORT_FEATURE_CONNECTED_SWITCH_MASK);
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008067 bnx2x_phy_probe(&bp->link_params);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008068 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008069
8070 bnx2x_link_settings_requested(bp);
8071
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008072 /*
8073 * If connected directly, work with the internal PHY, otherwise, work
8074 * with the external PHY
8075 */
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008076 ext_phy_config =
8077 SHMEM_RD(bp,
8078 dev_info.port_hw_config[port].external_phy_config);
8079 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008080 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008081 bp->mdio.prtad = bp->port.phy_addr;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008082
8083 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8084 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8085 bp->mdio.prtad =
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008086 XGXS_EXT_PHY_ADDR(ext_phy_config);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008087
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008088 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8089 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
Michael Chane665bfd2009-10-10 13:46:54 +00008090 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008091 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8092 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
Michael Chan37b091b2009-10-10 13:46:55 +00008093
8094#ifdef BCM_CNIC
8095 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8096 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8097 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8098#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008099}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008100
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008101static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8102{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008103 int func = BP_ABS_FUNC(bp);
8104 int vn;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008105 u32 val, val2;
8106 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008107
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008108 bnx2x_get_common_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008109
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008110 if (CHIP_IS_E1x(bp)) {
8111 bp->common.int_block = INT_BLOCK_HC;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008112
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008113 bp->igu_dsb_id = DEF_SB_IGU_ID;
8114 bp->igu_base_sb = 0;
8115 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, bp->l2_cid_count);
8116 } else {
8117 bp->common.int_block = INT_BLOCK_IGU;
8118 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8119 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8120 DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8121 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8122 } else
8123 DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
8124
8125 bnx2x_get_igu_cam_info(bp);
8126
8127 }
8128 DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
8129 bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8130
8131 /*
8132 * Initialize MF configuration
8133 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008134
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008135 bp->mf_ov = 0;
8136 bp->mf_mode = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008137 vn = BP_E1HVN(bp);
8138 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
8139 if (SHMEM2_HAS(bp, mf_cfg_addr))
8140 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8141 else
8142 bp->common.mf_cfg_base = bp->common.shmem_base +
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008143 offsetof(struct shmem_region, func_mb) +
8144 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008145 bp->mf_config[vn] =
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008146 MF_CFG_RD(bp, func_mf_config[func].config);
8147
8148 val = (MF_CFG_RD(bp, func_mf_config[FUNC_0].e1hov_tag) &
Eilon Greenstein3196a882008-08-13 15:58:49 -07008149 FUNC_MF_CFG_E1HOV_TAG_MASK);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008150 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008151 bp->mf_mode = 1;
Eilon Greenstein2691d512009-08-12 08:22:08 +00008152 BNX2X_DEV_INFO("%s function mode\n",
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008153 IS_MF(bp) ? "multi" : "single");
Eilon Greenstein2691d512009-08-12 08:22:08 +00008154
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008155 if (IS_MF(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008156 val = (MF_CFG_RD(bp, func_mf_config[func].
Eilon Greenstein2691d512009-08-12 08:22:08 +00008157 e1hov_tag) &
8158 FUNC_MF_CFG_E1HOV_TAG_MASK);
8159 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008160 bp->mf_ov = val;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008161 BNX2X_DEV_INFO("MF OV for func %d is %d "
Eilon Greenstein2691d512009-08-12 08:22:08 +00008162 "(0x%04x)\n",
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008163 func, bp->mf_ov, bp->mf_ov);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008164 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008165 BNX2X_ERROR("No valid MF OV for func %d,"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008166 " aborting\n", func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008167 rc = -EPERM;
8168 }
Eilon Greenstein2691d512009-08-12 08:22:08 +00008169 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008170 if (BP_VN(bp)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008171 BNX2X_ERROR("VN %d in single function mode,"
8172 " aborting\n", BP_E1HVN(bp));
Eilon Greenstein2691d512009-08-12 08:22:08 +00008173 rc = -EPERM;
8174 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008175 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008176 }
8177
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008178 /* adjust igu_sb_cnt to MF for E1x */
8179 if (CHIP_IS_E1x(bp) && IS_MF(bp))
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008180 bp->igu_sb_cnt /= E1HVN_MAX;
8181
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008182 /*
8183 * adjust E2 sb count: to be removed when FW will support
8184 * more then 16 L2 clients
8185 */
8186#define MAX_L2_CLIENTS 16
8187 if (CHIP_IS_E2(bp))
8188 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8189 MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8190
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008191 if (!BP_NOMCP(bp)) {
8192 bnx2x_get_port_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008193
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008194 bp->fw_seq =
8195 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8196 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008197 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8198 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008199
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008200 if (IS_MF(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008201 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8202 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008203 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8204 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8205 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8206 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8207 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8208 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8209 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8210 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8211 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8212 ETH_ALEN);
8213 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8214 ETH_ALEN);
8215 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008216
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008217 return rc;
8218 }
8219
8220 if (BP_NOMCP(bp)) {
8221 /* only supposed to happen on emulation/FPGA */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008222 BNX2X_ERROR("warning: random MAC workaround active\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008223 random_ether_addr(bp->dev->dev_addr);
8224 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8225 }
8226
8227 return rc;
8228}
8229
Vladislav Zolotarov34f24c72010-04-19 01:13:23 +00008230static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
8231{
8232 int cnt, i, block_end, rodi;
8233 char vpd_data[BNX2X_VPD_LEN+1];
8234 char str_id_reg[VENDOR_ID_LEN+1];
8235 char str_id_cap[VENDOR_ID_LEN+1];
8236 u8 len;
8237
8238 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
8239 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
8240
8241 if (cnt < BNX2X_VPD_LEN)
8242 goto out_not_found;
8243
8244 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
8245 PCI_VPD_LRDT_RO_DATA);
8246 if (i < 0)
8247 goto out_not_found;
8248
8249
8250 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
8251 pci_vpd_lrdt_size(&vpd_data[i]);
8252
8253 i += PCI_VPD_LRDT_TAG_SIZE;
8254
8255 if (block_end > BNX2X_VPD_LEN)
8256 goto out_not_found;
8257
8258 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8259 PCI_VPD_RO_KEYWORD_MFR_ID);
8260 if (rodi < 0)
8261 goto out_not_found;
8262
8263 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8264
8265 if (len != VENDOR_ID_LEN)
8266 goto out_not_found;
8267
8268 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8269
8270 /* vendor specific info */
8271 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
8272 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
8273 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
8274 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
8275
8276 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8277 PCI_VPD_RO_KEYWORD_VENDOR0);
8278 if (rodi >= 0) {
8279 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8280
8281 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8282
8283 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
8284 memcpy(bp->fw_ver, &vpd_data[rodi], len);
8285 bp->fw_ver[len] = ' ';
8286 }
8287 }
8288 return;
8289 }
8290out_not_found:
8291 return;
8292}
8293
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008294static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8295{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008296 int func;
Eilon Greenstein87942b42009-02-12 08:36:49 +00008297 int timer_interval;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008298 int rc;
8299
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008300 /* Disable interrupt handling until HW is initialized */
8301 atomic_set(&bp->intr_sem, 1);
Eilon Greensteine1510702009-07-21 05:47:41 +00008302 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008303
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008304 mutex_init(&bp->port.phy_mutex);
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07008305 mutex_init(&bp->fw_mb_mutex);
David S. Millerbb7e95c2010-07-27 21:01:35 -07008306 spin_lock_init(&bp->stats_lock);
Michael Chan993ac7b2009-10-10 13:46:56 +00008307#ifdef BCM_CNIC
8308 mutex_init(&bp->cnic_mutex);
8309#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008310
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08008311 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008312 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008313
8314 rc = bnx2x_get_hwinfo(bp);
8315
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008316 if (!rc)
8317 rc = bnx2x_alloc_mem_bp(bp);
8318
Vladislav Zolotarov34f24c72010-04-19 01:13:23 +00008319 bnx2x_read_fwinfo(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008320
8321 func = BP_FUNC(bp);
8322
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008323 /* need to reset chip if undi was active */
8324 if (!BP_NOMCP(bp))
8325 bnx2x_undi_unload(bp);
8326
8327 if (CHIP_REV_IS_FPGA(bp))
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008328 dev_err(&bp->pdev->dev, "FPGA detected\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008329
8330 if (BP_NOMCP(bp) && (func == 0))
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008331 dev_err(&bp->pdev->dev, "MCP disabled, "
8332 "must load devices in order!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008333
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008334 /* Set multi queue mode */
Eilon Greenstein8badd272009-02-12 08:36:15 +00008335 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8336 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008337 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
8338 "requested is not MSI-X\n");
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008339 multi_mode = ETH_RSS_MODE_DISABLED;
8340 }
8341 bp->multi_mode = multi_mode;
Dmitry Kravkov5d7cd492010-07-27 12:32:19 +00008342 bp->int_mode = int_mode;
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008343
Dmitry Kravkov4fd89b7a2010-04-01 19:45:34 -07008344 bp->dev->features |= NETIF_F_GRO;
8345
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008346 /* Set TPA flags */
8347 if (disable_tpa) {
8348 bp->flags &= ~TPA_ENABLE_FLAG;
8349 bp->dev->features &= ~NETIF_F_LRO;
8350 } else {
8351 bp->flags |= TPA_ENABLE_FLAG;
8352 bp->dev->features |= NETIF_F_LRO;
8353 }
Dmitry Kravkov5d7cd492010-07-27 12:32:19 +00008354 bp->disable_tpa = disable_tpa;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008355
Eilon Greensteina18f5122009-08-12 08:23:26 +00008356 if (CHIP_IS_E1(bp))
8357 bp->dropless_fc = 0;
8358 else
8359 bp->dropless_fc = dropless_fc;
8360
Eilon Greenstein8d5726c2009-02-12 08:37:19 +00008361 bp->mrrs = mrrs;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008362
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008363 bp->tx_ring_size = MAX_TX_AVAIL;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008364
8365 bp->rx_csum = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008366
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00008367 /* make sure that the numbers are in the right granularity */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008368 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
8369 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008370
Eilon Greenstein87942b42009-02-12 08:36:49 +00008371 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8372 bp->current_interval = (poll ? poll : timer_interval);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008373
8374 init_timer(&bp->timer);
8375 bp->timer.expires = jiffies + bp->current_interval;
8376 bp->timer.data = (unsigned long) bp;
8377 bp->timer.function = bnx2x_timer;
8378
8379 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008380}
8381
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008382
Dmitry Kravkovde0c62d2010-07-27 12:35:24 +00008383/****************************************************************************
8384* General service functions
8385****************************************************************************/
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008386
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008387/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008388static int bnx2x_open(struct net_device *dev)
8389{
8390 struct bnx2x *bp = netdev_priv(dev);
8391
Eilon Greenstein6eccabb2009-01-22 03:37:48 +00008392 netif_carrier_off(dev);
8393
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008394 bnx2x_set_power_state(bp, PCI_D0);
8395
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008396 if (!bnx2x_reset_is_done(bp)) {
8397 do {
8398 /* Reset MCP mail box sequence if there is on going
8399 * recovery
8400 */
8401 bp->fw_seq = 0;
8402
8403 /* If it's the first function to load and reset done
8404 * is still not cleared it may mean that. We don't
8405 * check the attention state here because it may have
8406 * already been cleared by a "common" reset but we
8407 * shell proceed with "process kill" anyway.
8408 */
8409 if ((bnx2x_get_load_cnt(bp) == 0) &&
8410 bnx2x_trylock_hw_lock(bp,
8411 HW_LOCK_RESOURCE_RESERVED_08) &&
8412 (!bnx2x_leader_reset(bp))) {
8413 DP(NETIF_MSG_HW, "Recovered in open\n");
8414 break;
8415 }
8416
8417 bnx2x_set_power_state(bp, PCI_D3hot);
8418
8419 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
8420 " completed yet. Try again later. If u still see this"
8421 " message after a few retries then power cycle is"
8422 " required.\n", bp->dev->name);
8423
8424 return -EAGAIN;
8425 } while (0);
8426 }
8427
8428 bp->recovery_state = BNX2X_RECOVERY_DONE;
8429
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008430 return bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008431}
8432
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008433/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008434static int bnx2x_close(struct net_device *dev)
8435{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008436 struct bnx2x *bp = netdev_priv(dev);
8437
8438 /* Unload the driver, release IRQs */
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008439 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
Vladislav Zolotarovd3dbfee2010-04-19 01:14:49 +00008440 bnx2x_set_power_state(bp, PCI_D3hot);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008441
8442 return 0;
8443}
8444
Eilon Greensteinf5372252009-02-12 08:38:30 +00008445/* called with netif_tx_lock from dev_mcast.c */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00008446void bnx2x_set_rx_mode(struct net_device *dev)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008447{
8448 struct bnx2x *bp = netdev_priv(dev);
8449 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8450 int port = BP_PORT(bp);
8451
8452 if (bp->state != BNX2X_STATE_OPEN) {
8453 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
8454 return;
8455 }
8456
8457 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
8458
8459 if (dev->flags & IFF_PROMISC)
8460 rx_mode = BNX2X_RX_MODE_PROMISC;
8461
8462 else if ((dev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00008463 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
8464 CHIP_IS_E1(bp)))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008465 rx_mode = BNX2X_RX_MODE_ALLMULTI;
8466
8467 else { /* some multicasts */
8468 if (CHIP_IS_E1(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008469 /*
8470 * set mc list, do not wait as wait implies sleep
8471 * and set_rx_mode can be invoked from non-sleepable
8472 * context
8473 */
8474 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
8475 BNX2X_MAX_EMUL_MULTI*(1 + port) :
8476 BNX2X_MAX_MULTICAST*(1 + port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008477
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008478 bnx2x_set_e1_mc_list(bp, offset);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008479 } else { /* E1H */
8480 /* Accept one or more multicasts */
Jiri Pirko22bedad32010-04-01 21:22:57 +00008481 struct netdev_hw_addr *ha;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008482 u32 mc_filter[MC_HASH_SIZE];
8483 u32 crc, bit, regidx;
8484 int i;
8485
8486 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
8487
Jiri Pirko22bedad32010-04-01 21:22:57 +00008488 netdev_for_each_mc_addr(ha, dev) {
Johannes Berg7c510e42008-10-27 17:47:26 -07008489 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008490 bnx2x_mc_addr(ha));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008491
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008492 crc = crc32c_le(0, bnx2x_mc_addr(ha),
8493 ETH_ALEN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008494 bit = (crc >> 24) & 0xff;
8495 regidx = bit >> 5;
8496 bit &= 0x1f;
8497 mc_filter[regidx] |= (1 << bit);
8498 }
8499
8500 for (i = 0; i < MC_HASH_SIZE; i++)
8501 REG_WR(bp, MC_HASH_OFFSET(bp, i),
8502 mc_filter[i]);
8503 }
8504 }
8505
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008506
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008507 bp->rx_mode = rx_mode;
8508 bnx2x_set_storm_rx_mode(bp);
8509}
8510
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008511
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008512/* called with rtnl_lock */
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008513static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
8514 int devad, u16 addr)
8515{
8516 struct bnx2x *bp = netdev_priv(netdev);
8517 u16 value;
8518 int rc;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008519
8520 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
8521 prtad, devad, addr);
8522
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008523 /* The HW expects different devad if CL22 is used */
8524 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8525
8526 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnere10bc842010-09-07 11:40:50 +00008527 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008528 bnx2x_release_phy_lock(bp);
8529 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
8530
8531 if (!rc)
8532 rc = value;
8533 return rc;
8534}
8535
8536/* called with rtnl_lock */
8537static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
8538 u16 addr, u16 value)
8539{
8540 struct bnx2x *bp = netdev_priv(netdev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008541 int rc;
8542
8543 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
8544 " value 0x%x\n", prtad, devad, addr, value);
8545
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008546 /* The HW expects different devad if CL22 is used */
8547 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8548
8549 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnere10bc842010-09-07 11:40:50 +00008550 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008551 bnx2x_release_phy_lock(bp);
8552 return rc;
8553}
8554
8555/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008556static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8557{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008558 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008559 struct mii_ioctl_data *mdio = if_mii(ifr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008560
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008561 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
8562 mdio->phy_id, mdio->reg_num, mdio->val_in);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008563
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008564 if (!netif_running(dev))
8565 return -EAGAIN;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008566
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008567 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008568}
8569
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +00008570#ifdef CONFIG_NET_POLL_CONTROLLER
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008571static void poll_bnx2x(struct net_device *dev)
8572{
8573 struct bnx2x *bp = netdev_priv(dev);
8574
8575 disable_irq(bp->pdev->irq);
8576 bnx2x_interrupt(bp->pdev->irq, dev);
8577 enable_irq(bp->pdev->irq);
8578}
8579#endif
8580
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008581static const struct net_device_ops bnx2x_netdev_ops = {
8582 .ndo_open = bnx2x_open,
8583 .ndo_stop = bnx2x_close,
8584 .ndo_start_xmit = bnx2x_start_xmit,
Eilon Greenstein356e2382009-02-12 08:38:32 +00008585 .ndo_set_multicast_list = bnx2x_set_rx_mode,
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008586 .ndo_set_mac_address = bnx2x_change_mac_addr,
8587 .ndo_validate_addr = eth_validate_addr,
8588 .ndo_do_ioctl = bnx2x_ioctl,
8589 .ndo_change_mtu = bnx2x_change_mtu,
8590 .ndo_tx_timeout = bnx2x_tx_timeout,
8591#ifdef BCM_VLAN
8592 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
8593#endif
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +00008594#ifdef CONFIG_NET_POLL_CONTROLLER
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008595 .ndo_poll_controller = poll_bnx2x,
8596#endif
8597};
8598
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008599static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
8600 struct net_device *dev)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008601{
8602 struct bnx2x *bp;
8603 int rc;
8604
8605 SET_NETDEV_DEV(dev, &pdev->dev);
8606 bp = netdev_priv(dev);
8607
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008608 bp->dev = dev;
8609 bp->pdev = pdev;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008610 bp->flags = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008611 bp->pf_num = PCI_FUNC(pdev->devfn);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008612
8613 rc = pci_enable_device(pdev);
8614 if (rc) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008615 dev_err(&bp->pdev->dev,
8616 "Cannot enable PCI device, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008617 goto err_out;
8618 }
8619
8620 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008621 dev_err(&bp->pdev->dev,
8622 "Cannot find PCI device base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008623 rc = -ENODEV;
8624 goto err_out_disable;
8625 }
8626
8627 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008628 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
8629 " base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008630 rc = -ENODEV;
8631 goto err_out_disable;
8632 }
8633
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008634 if (atomic_read(&pdev->enable_cnt) == 1) {
8635 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8636 if (rc) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008637 dev_err(&bp->pdev->dev,
8638 "Cannot obtain PCI resources, aborting\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008639 goto err_out_disable;
8640 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008641
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008642 pci_set_master(pdev);
8643 pci_save_state(pdev);
8644 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008645
8646 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8647 if (bp->pm_cap == 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008648 dev_err(&bp->pdev->dev,
8649 "Cannot find power management capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008650 rc = -EIO;
8651 goto err_out_release;
8652 }
8653
8654 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
8655 if (bp->pcie_cap == 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008656 dev_err(&bp->pdev->dev,
8657 "Cannot find PCI Express capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008658 rc = -EIO;
8659 goto err_out_release;
8660 }
8661
FUJITA Tomonori1a983142010-04-04 01:51:03 +00008662 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008663 bp->flags |= USING_DAC_FLAG;
FUJITA Tomonori1a983142010-04-04 01:51:03 +00008664 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008665 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
8666 " failed, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008667 rc = -EIO;
8668 goto err_out_release;
8669 }
8670
FUJITA Tomonori1a983142010-04-04 01:51:03 +00008671 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008672 dev_err(&bp->pdev->dev,
8673 "System does not support DMA, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008674 rc = -EIO;
8675 goto err_out_release;
8676 }
8677
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008678 dev->mem_start = pci_resource_start(pdev, 0);
8679 dev->base_addr = dev->mem_start;
8680 dev->mem_end = pci_resource_end(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008681
8682 dev->irq = pdev->irq;
8683
Arjan van de Ven275f1652008-10-20 21:42:39 -07008684 bp->regview = pci_ioremap_bar(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008685 if (!bp->regview) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008686 dev_err(&bp->pdev->dev,
8687 "Cannot map register space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008688 rc = -ENOMEM;
8689 goto err_out_release;
8690 }
8691
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008692 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008693 min_t(u64, BNX2X_DB_SIZE(bp),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008694 pci_resource_len(pdev, 2)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008695 if (!bp->doorbells) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008696 dev_err(&bp->pdev->dev,
8697 "Cannot map doorbell space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008698 rc = -ENOMEM;
8699 goto err_out_unmap;
8700 }
8701
8702 bnx2x_set_power_state(bp, PCI_D0);
8703
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008704 /* clean indirect addresses */
8705 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
8706 PCICFG_VENDOR_ID_OFFSET);
8707 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
8708 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
8709 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
8710 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008711
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008712 /* Reset the load counter */
8713 bnx2x_clear_load_cnt(bp);
8714
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008715 dev->watchdog_timeo = TX_TIMEOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008716
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008717 dev->netdev_ops = &bnx2x_netdev_ops;
Dmitry Kravkovde0c62d2010-07-27 12:35:24 +00008718 bnx2x_set_ethtool_ops(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008719 dev->features |= NETIF_F_SG;
8720 dev->features |= NETIF_F_HW_CSUM;
8721 if (bp->flags & USING_DAC_FLAG)
8722 dev->features |= NETIF_F_HIGHDMA;
Eilon Greenstein5316bc02009-07-21 05:47:43 +00008723 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8724 dev->features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008725#ifdef BCM_VLAN
8726 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08008727 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
Eilon Greenstein5316bc02009-07-21 05:47:43 +00008728
8729 dev->vlan_features |= NETIF_F_SG;
8730 dev->vlan_features |= NETIF_F_HW_CSUM;
8731 if (bp->flags & USING_DAC_FLAG)
8732 dev->vlan_features |= NETIF_F_HIGHDMA;
8733 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8734 dev->vlan_features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008735#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008736
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008737 /* get_port_hwinfo() will set prtad and mmds properly */
8738 bp->mdio.prtad = MDIO_PRTAD_NONE;
8739 bp->mdio.mmds = 0;
8740 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
8741 bp->mdio.dev = dev;
8742 bp->mdio.mdio_read = bnx2x_mdio_read;
8743 bp->mdio.mdio_write = bnx2x_mdio_write;
8744
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008745 return 0;
8746
8747err_out_unmap:
8748 if (bp->regview) {
8749 iounmap(bp->regview);
8750 bp->regview = NULL;
8751 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008752 if (bp->doorbells) {
8753 iounmap(bp->doorbells);
8754 bp->doorbells = NULL;
8755 }
8756
8757err_out_release:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008758 if (atomic_read(&pdev->enable_cnt) == 1)
8759 pci_release_regions(pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008760
8761err_out_disable:
8762 pci_disable_device(pdev);
8763 pci_set_drvdata(pdev, NULL);
8764
8765err_out:
8766 return rc;
8767}
8768
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008769static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
8770 int *width, int *speed)
Eliezer Tamir25047952008-02-28 11:50:16 -08008771{
8772 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
8773
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008774 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
8775
8776 /* return value of 1=2.5GHz 2=5GHz */
8777 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
Eliezer Tamir25047952008-02-28 11:50:16 -08008778}
8779
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008780static int bnx2x_check_firmware(struct bnx2x *bp)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008781{
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008782 const struct firmware *firmware = bp->firmware;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008783 struct bnx2x_fw_file_hdr *fw_hdr;
8784 struct bnx2x_fw_file_section *sections;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008785 u32 offset, len, num_ops;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008786 u16 *ops_offsets;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008787 int i;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008788 const u8 *fw_ver;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008789
8790 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
8791 return -EINVAL;
8792
8793 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
8794 sections = (struct bnx2x_fw_file_section *)fw_hdr;
8795
8796 /* Make sure none of the offsets and sizes make us read beyond
8797 * the end of the firmware data */
8798 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
8799 offset = be32_to_cpu(sections[i].offset);
8800 len = be32_to_cpu(sections[i].len);
8801 if (offset + len > firmware->size) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008802 dev_err(&bp->pdev->dev,
8803 "Section %d length is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008804 return -EINVAL;
8805 }
8806 }
8807
8808 /* Likewise for the init_ops offsets */
8809 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
8810 ops_offsets = (u16 *)(firmware->data + offset);
8811 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
8812
8813 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
8814 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008815 dev_err(&bp->pdev->dev,
8816 "Section offset %d is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008817 return -EINVAL;
8818 }
8819 }
8820
8821 /* Check FW version */
8822 offset = be32_to_cpu(fw_hdr->fw_version.offset);
8823 fw_ver = firmware->data + offset;
8824 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
8825 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
8826 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
8827 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008828 dev_err(&bp->pdev->dev,
8829 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008830 fw_ver[0], fw_ver[1], fw_ver[2],
8831 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
8832 BCM_5710_FW_MINOR_VERSION,
8833 BCM_5710_FW_REVISION_VERSION,
8834 BCM_5710_FW_ENGINEERING_VERSION);
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008835 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008836 }
8837
8838 return 0;
8839}
8840
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008841static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008842{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008843 const __be32 *source = (const __be32 *)_source;
8844 u32 *target = (u32 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008845 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008846
8847 for (i = 0; i < n/4; i++)
8848 target[i] = be32_to_cpu(source[i]);
8849}
8850
8851/*
8852 Ops array is stored in the following format:
8853 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
8854 */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008855static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008856{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008857 const __be32 *source = (const __be32 *)_source;
8858 struct raw_op *target = (struct raw_op *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008859 u32 i, j, tmp;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008860
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008861 for (i = 0, j = 0; i < n/8; i++, j += 2) {
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008862 tmp = be32_to_cpu(source[j]);
8863 target[i].op = (tmp >> 24) & 0xff;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008864 target[i].offset = tmp & 0xffffff;
8865 target[i].raw_data = be32_to_cpu(source[j + 1]);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008866 }
8867}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008868
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008869/**
8870 * IRO array is stored in the following format:
8871 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
8872 */
8873static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
8874{
8875 const __be32 *source = (const __be32 *)_source;
8876 struct iro *target = (struct iro *)_target;
8877 u32 i, j, tmp;
8878
8879 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
8880 target[i].base = be32_to_cpu(source[j]);
8881 j++;
8882 tmp = be32_to_cpu(source[j]);
8883 target[i].m1 = (tmp >> 16) & 0xffff;
8884 target[i].m2 = tmp & 0xffff;
8885 j++;
8886 tmp = be32_to_cpu(source[j]);
8887 target[i].m3 = (tmp >> 16) & 0xffff;
8888 target[i].size = tmp & 0xffff;
8889 j++;
8890 }
8891}
8892
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008893static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008894{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008895 const __be16 *source = (const __be16 *)_source;
8896 u16 *target = (u16 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008897 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008898
8899 for (i = 0; i < n/2; i++)
8900 target[i] = be16_to_cpu(source[i]);
8901}
8902
Joe Perches7995c642010-02-17 15:01:52 +00008903#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
8904do { \
8905 u32 len = be32_to_cpu(fw_hdr->arr.len); \
8906 bp->arr = kmalloc(len, GFP_KERNEL); \
8907 if (!bp->arr) { \
8908 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
8909 goto lbl; \
8910 } \
8911 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
8912 (u8 *)bp->arr, len); \
8913} while (0)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008914
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008915int bnx2x_init_firmware(struct bnx2x *bp)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008916{
Ben Hutchings45229b42009-11-07 11:53:39 +00008917 const char *fw_file_name;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008918 struct bnx2x_fw_file_hdr *fw_hdr;
Ben Hutchings45229b42009-11-07 11:53:39 +00008919 int rc;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008920
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008921 if (CHIP_IS_E1(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +00008922 fw_file_name = FW_FILE_NAME_E1;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008923 else if (CHIP_IS_E1H(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +00008924 fw_file_name = FW_FILE_NAME_E1H;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008925 else if (CHIP_IS_E2(bp))
8926 fw_file_name = FW_FILE_NAME_E2;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008927 else {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008928 BNX2X_ERR("Unsupported chip revision\n");
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008929 return -EINVAL;
8930 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008931
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008932 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008933
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008934 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008935 if (rc) {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008936 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008937 goto request_firmware_exit;
8938 }
8939
8940 rc = bnx2x_check_firmware(bp);
8941 if (rc) {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008942 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008943 goto request_firmware_exit;
8944 }
8945
8946 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
8947
8948 /* Initialize the pointers to the init arrays */
8949 /* Blob */
8950 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
8951
8952 /* Opcodes */
8953 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
8954
8955 /* Offsets */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008956 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
8957 be16_to_cpu_n);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008958
8959 /* STORMs firmware */
Eilon Greenstein573f2032009-08-12 08:24:14 +00008960 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8961 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
8962 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
8963 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
8964 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8965 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
8966 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
8967 be32_to_cpu(fw_hdr->usem_pram_data.offset);
8968 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8969 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
8970 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
8971 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
8972 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8973 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
8974 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
8975 be32_to_cpu(fw_hdr->csem_pram_data.offset);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008976 /* IRO */
8977 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008978
8979 return 0;
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008980
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008981iro_alloc_err:
8982 kfree(bp->init_ops_offsets);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008983init_offsets_alloc_err:
8984 kfree(bp->init_ops);
8985init_ops_alloc_err:
8986 kfree(bp->init_data);
8987request_firmware_exit:
8988 release_firmware(bp->firmware);
8989
8990 return rc;
8991}
8992
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008993static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
8994{
8995 int cid_count = L2_FP_COUNT(l2_cid_count);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008996
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008997#ifdef BCM_CNIC
8998 cid_count += CNIC_CID_MAX;
8999#endif
9000 return roundup(cid_count, QM_CID_ROUND);
9001}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009002static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9003 const struct pci_device_id *ent)
9004{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009005 struct net_device *dev = NULL;
9006 struct bnx2x *bp;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009007 int pcie_width, pcie_speed;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009008 int rc, cid_count;
9009
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009010 switch (ent->driver_data) {
9011 case BCM57710:
9012 case BCM57711:
9013 case BCM57711E:
9014 cid_count = FP_SB_MAX_E1x;
9015 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009016
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009017 case BCM57712:
9018 case BCM57712E:
9019 cid_count = FP_SB_MAX_E2;
9020 break;
9021
9022 default:
9023 pr_err("Unknown board_type (%ld), aborting\n",
9024 ent->driver_data);
9025 return ENODEV;
9026 }
9027
9028 cid_count += CNIC_CONTEXT_USE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009029 /* dev zeroed in init_etherdev */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009030 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009031 if (!dev) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009032 dev_err(&pdev->dev, "Cannot allocate net device\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009033 return -ENOMEM;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009034 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009035
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009036 bp = netdev_priv(dev);
Joe Perches7995c642010-02-17 15:01:52 +00009037 bp->msg_enable = debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009038
Eilon Greensteindf4770de2009-08-12 08:23:28 +00009039 pci_set_drvdata(pdev, dev);
9040
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009041 bp->l2_cid_count = cid_count;
9042
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009043 rc = bnx2x_init_dev(pdev, dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009044 if (rc < 0) {
9045 free_netdev(dev);
9046 return rc;
9047 }
9048
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009049 rc = bnx2x_init_bp(bp);
Eilon Greenstein693fc0d2009-01-14 06:43:52 +00009050 if (rc)
9051 goto init_one_exit;
9052
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009053 /* calc qm_cid_count */
9054 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9055
Eilon Greenstein693fc0d2009-01-14 06:43:52 +00009056 rc = register_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009057 if (rc) {
Eilon Greenstein693fc0d2009-01-14 06:43:52 +00009058 dev_err(&pdev->dev, "Cannot register net device\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009059 goto init_one_exit;
9060 }
9061
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009062 /* Configure interupt mode: try to enable MSI-X/MSI if
9063 * needed, set bp->num_queues appropriately.
9064 */
9065 bnx2x_set_int_mode(bp);
9066
9067 /* Add all NAPI objects */
9068 bnx2x_add_all_napi(bp);
9069
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009070 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009071
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009072 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9073 " IRQ %d, ", board_info[ent->driver_data].name,
9074 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009075 pcie_width,
9076 ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9077 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9078 "5GHz (Gen2)" : "2.5GHz",
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009079 dev->base_addr, bp->pdev->irq);
9080 pr_cont("node addr %pM\n", dev->dev_addr);
Eilon Greensteinc0162012009-03-02 08:01:05 +00009081
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009082 return 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009083
9084init_one_exit:
9085 if (bp->regview)
9086 iounmap(bp->regview);
9087
9088 if (bp->doorbells)
9089 iounmap(bp->doorbells);
9090
9091 free_netdev(dev);
9092
9093 if (atomic_read(&pdev->enable_cnt) == 1)
9094 pci_release_regions(pdev);
9095
9096 pci_disable_device(pdev);
9097 pci_set_drvdata(pdev, NULL);
9098
9099 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009100}
9101
9102static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9103{
9104 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -08009105 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009106
Eliezer Tamir228241e2008-02-28 11:56:57 -08009107 if (!dev) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009108 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
Eliezer Tamir228241e2008-02-28 11:56:57 -08009109 return;
9110 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08009111 bp = netdev_priv(dev);
9112
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009113 unregister_netdev(dev);
9114
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009115 /* Delete all NAPI objects */
9116 bnx2x_del_all_napi(bp);
9117
9118 /* Disable MSI/MSI-X */
9119 bnx2x_disable_msi(bp);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009120 /* Make sure RESET task is not scheduled before continuing */
9121 cancel_delayed_work_sync(&bp->reset_task);
9122
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009123 if (bp->regview)
9124 iounmap(bp->regview);
9125
9126 if (bp->doorbells)
9127 iounmap(bp->doorbells);
9128
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009129 bnx2x_free_mem_bp(bp);
9130
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009131 free_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009132
9133 if (atomic_read(&pdev->enable_cnt) == 1)
9134 pci_release_regions(pdev);
9135
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009136 pci_disable_device(pdev);
9137 pci_set_drvdata(pdev, NULL);
9138}
9139
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009140static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
9141{
9142 int i;
9143
9144 bp->state = BNX2X_STATE_ERROR;
9145
9146 bp->rx_mode = BNX2X_RX_MODE_NONE;
9147
9148 bnx2x_netif_stop(bp, 0);
Stanislaw Gruszkac89af1a2010-05-17 17:35:38 -07009149 netif_carrier_off(bp->dev);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009150
9151 del_timer_sync(&bp->timer);
9152 bp->stats_state = STATS_STATE_DISABLED;
9153 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
9154
9155 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009156 bnx2x_free_irq(bp);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009157
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009158 /* Free SKBs, SGEs, TPA pool and driver internals */
9159 bnx2x_free_skbs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009160
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00009161 for_each_queue(bp, i)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009162 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009163
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009164 bnx2x_free_mem(bp);
9165
9166 bp->state = BNX2X_STATE_CLOSED;
9167
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009168 return 0;
9169}
9170
9171static void bnx2x_eeh_recover(struct bnx2x *bp)
9172{
9173 u32 val;
9174
9175 mutex_init(&bp->port.phy_mutex);
9176
9177 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9178 bp->link_params.shmem_base = bp->common.shmem_base;
9179 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
9180
9181 if (!bp->common.shmem_base ||
9182 (bp->common.shmem_base < 0xA0000) ||
9183 (bp->common.shmem_base >= 0xC0000)) {
9184 BNX2X_DEV_INFO("MCP not active\n");
9185 bp->flags |= NO_MCP_FLAG;
9186 return;
9187 }
9188
9189 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9190 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9191 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9192 BNX2X_ERR("BAD MCP validity signature\n");
9193
9194 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009195 bp->fw_seq =
9196 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9197 DRV_MSG_SEQ_NUMBER_MASK);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009198 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9199 }
9200}
9201
Wendy Xiong493adb12008-06-23 20:36:22 -07009202/**
9203 * bnx2x_io_error_detected - called when PCI error is detected
9204 * @pdev: Pointer to PCI device
9205 * @state: The current pci connection state
9206 *
9207 * This function is called after a PCI bus error affecting
9208 * this device has been detected.
9209 */
9210static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
9211 pci_channel_state_t state)
9212{
9213 struct net_device *dev = pci_get_drvdata(pdev);
9214 struct bnx2x *bp = netdev_priv(dev);
9215
9216 rtnl_lock();
9217
9218 netif_device_detach(dev);
9219
Dean Nelson07ce50e2009-07-31 09:13:25 +00009220 if (state == pci_channel_io_perm_failure) {
9221 rtnl_unlock();
9222 return PCI_ERS_RESULT_DISCONNECT;
9223 }
9224
Wendy Xiong493adb12008-06-23 20:36:22 -07009225 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009226 bnx2x_eeh_nic_unload(bp);
Wendy Xiong493adb12008-06-23 20:36:22 -07009227
9228 pci_disable_device(pdev);
9229
9230 rtnl_unlock();
9231
9232 /* Request a slot reset */
9233 return PCI_ERS_RESULT_NEED_RESET;
9234}
9235
9236/**
9237 * bnx2x_io_slot_reset - called after the PCI bus has been reset
9238 * @pdev: Pointer to PCI device
9239 *
9240 * Restart the card from scratch, as if from a cold-boot.
9241 */
9242static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
9243{
9244 struct net_device *dev = pci_get_drvdata(pdev);
9245 struct bnx2x *bp = netdev_priv(dev);
9246
9247 rtnl_lock();
9248
9249 if (pci_enable_device(pdev)) {
9250 dev_err(&pdev->dev,
9251 "Cannot re-enable PCI device after reset\n");
9252 rtnl_unlock();
9253 return PCI_ERS_RESULT_DISCONNECT;
9254 }
9255
9256 pci_set_master(pdev);
9257 pci_restore_state(pdev);
9258
9259 if (netif_running(dev))
9260 bnx2x_set_power_state(bp, PCI_D0);
9261
9262 rtnl_unlock();
9263
9264 return PCI_ERS_RESULT_RECOVERED;
9265}
9266
9267/**
9268 * bnx2x_io_resume - called when traffic can start flowing again
9269 * @pdev: Pointer to PCI device
9270 *
9271 * This callback is called when the error recovery driver tells us that
9272 * its OK to resume normal operation.
9273 */
9274static void bnx2x_io_resume(struct pci_dev *pdev)
9275{
9276 struct net_device *dev = pci_get_drvdata(pdev);
9277 struct bnx2x *bp = netdev_priv(dev);
9278
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009279 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009280 printk(KERN_ERR "Handling parity error recovery. "
9281 "Try again later\n");
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009282 return;
9283 }
9284
Wendy Xiong493adb12008-06-23 20:36:22 -07009285 rtnl_lock();
9286
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009287 bnx2x_eeh_recover(bp);
9288
Wendy Xiong493adb12008-06-23 20:36:22 -07009289 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009290 bnx2x_nic_load(bp, LOAD_NORMAL);
Wendy Xiong493adb12008-06-23 20:36:22 -07009291
9292 netif_device_attach(dev);
9293
9294 rtnl_unlock();
9295}
9296
9297static struct pci_error_handlers bnx2x_err_handler = {
9298 .error_detected = bnx2x_io_error_detected,
Eilon Greenstein356e2382009-02-12 08:38:32 +00009299 .slot_reset = bnx2x_io_slot_reset,
9300 .resume = bnx2x_io_resume,
Wendy Xiong493adb12008-06-23 20:36:22 -07009301};
9302
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009303static struct pci_driver bnx2x_pci_driver = {
Wendy Xiong493adb12008-06-23 20:36:22 -07009304 .name = DRV_MODULE_NAME,
9305 .id_table = bnx2x_pci_tbl,
9306 .probe = bnx2x_init_one,
9307 .remove = __devexit_p(bnx2x_remove_one),
9308 .suspend = bnx2x_suspend,
9309 .resume = bnx2x_resume,
9310 .err_handler = &bnx2x_err_handler,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009311};
9312
9313static int __init bnx2x_init(void)
9314{
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009315 int ret;
9316
Joe Perches7995c642010-02-17 15:01:52 +00009317 pr_info("%s", version);
Eilon Greenstein938cf542009-08-12 08:23:37 +00009318
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009319 bnx2x_wq = create_singlethread_workqueue("bnx2x");
9320 if (bnx2x_wq == NULL) {
Joe Perches7995c642010-02-17 15:01:52 +00009321 pr_err("Cannot create workqueue\n");
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009322 return -ENOMEM;
9323 }
9324
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009325 ret = pci_register_driver(&bnx2x_pci_driver);
9326 if (ret) {
Joe Perches7995c642010-02-17 15:01:52 +00009327 pr_err("Cannot register driver\n");
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009328 destroy_workqueue(bnx2x_wq);
9329 }
9330 return ret;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009331}
9332
9333static void __exit bnx2x_cleanup(void)
9334{
9335 pci_unregister_driver(&bnx2x_pci_driver);
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009336
9337 destroy_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009338}
9339
9340module_init(bnx2x_init);
9341module_exit(bnx2x_cleanup);
9342
Michael Chan993ac7b2009-10-10 13:46:56 +00009343#ifdef BCM_CNIC
9344
9345/* count denotes the number of new completions we have seen */
9346static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9347{
9348 struct eth_spe *spe;
9349
9350#ifdef BNX2X_STOP_ON_ERROR
9351 if (unlikely(bp->panic))
9352 return;
9353#endif
9354
9355 spin_lock_bh(&bp->spq_lock);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009356 BUG_ON(bp->cnic_spq_pending < count);
Michael Chan993ac7b2009-10-10 13:46:56 +00009357 bp->cnic_spq_pending -= count;
9358
Michael Chan993ac7b2009-10-10 13:46:56 +00009359
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009360 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
9361 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
9362 & SPE_HDR_CONN_TYPE) >>
9363 SPE_HDR_CONN_TYPE_SHIFT;
9364
9365 /* Set validation for iSCSI L2 client before sending SETUP
9366 * ramrod
9367 */
9368 if (type == ETH_CONNECTION_TYPE) {
9369 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
9370 hdr.conn_and_cmd_data) >>
9371 SPE_HDR_CMD_ID_SHIFT) & 0xff;
9372
9373 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
9374 bnx2x_set_ctx_validation(&bp->context.
9375 vcxt[BNX2X_ISCSI_ETH_CID].eth,
9376 HW_CID(bp, BNX2X_ISCSI_ETH_CID));
9377 }
9378
9379 /* There may be not more than 8 L2 and COMMON SPEs and not more
9380 * than 8 L5 SPEs in the air.
9381 */
9382 if ((type == NONE_CONNECTION_TYPE) ||
9383 (type == ETH_CONNECTION_TYPE)) {
9384 if (!atomic_read(&bp->spq_left))
9385 break;
9386 else
9387 atomic_dec(&bp->spq_left);
9388 } else if (type == ISCSI_CONNECTION_TYPE) {
9389 if (bp->cnic_spq_pending >=
9390 bp->cnic_eth_dev.max_kwqe_pending)
9391 break;
9392 else
9393 bp->cnic_spq_pending++;
9394 } else {
9395 BNX2X_ERR("Unknown SPE type: %d\n", type);
9396 bnx2x_panic();
Michael Chan993ac7b2009-10-10 13:46:56 +00009397 break;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009398 }
Michael Chan993ac7b2009-10-10 13:46:56 +00009399
9400 spe = bnx2x_sp_get_next(bp);
9401 *spe = *bp->cnic_kwq_cons;
9402
Michael Chan993ac7b2009-10-10 13:46:56 +00009403 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
9404 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
9405
9406 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
9407 bp->cnic_kwq_cons = bp->cnic_kwq;
9408 else
9409 bp->cnic_kwq_cons++;
9410 }
9411 bnx2x_sp_prod_update(bp);
9412 spin_unlock_bh(&bp->spq_lock);
9413}
9414
9415static int bnx2x_cnic_sp_queue(struct net_device *dev,
9416 struct kwqe_16 *kwqes[], u32 count)
9417{
9418 struct bnx2x *bp = netdev_priv(dev);
9419 int i;
9420
9421#ifdef BNX2X_STOP_ON_ERROR
9422 if (unlikely(bp->panic))
9423 return -EIO;
9424#endif
9425
9426 spin_lock_bh(&bp->spq_lock);
9427
9428 for (i = 0; i < count; i++) {
9429 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
9430
9431 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
9432 break;
9433
9434 *bp->cnic_kwq_prod = *spe;
9435
9436 bp->cnic_kwq_pending++;
9437
9438 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
9439 spe->hdr.conn_and_cmd_data, spe->hdr.type,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009440 spe->data.update_data_addr.hi,
9441 spe->data.update_data_addr.lo,
Michael Chan993ac7b2009-10-10 13:46:56 +00009442 bp->cnic_kwq_pending);
9443
9444 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
9445 bp->cnic_kwq_prod = bp->cnic_kwq;
9446 else
9447 bp->cnic_kwq_prod++;
9448 }
9449
9450 spin_unlock_bh(&bp->spq_lock);
9451
9452 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
9453 bnx2x_cnic_sp_post(bp, 0);
9454
9455 return i;
9456}
9457
9458static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9459{
9460 struct cnic_ops *c_ops;
9461 int rc = 0;
9462
9463 mutex_lock(&bp->cnic_mutex);
9464 c_ops = bp->cnic_ops;
9465 if (c_ops)
9466 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9467 mutex_unlock(&bp->cnic_mutex);
9468
9469 return rc;
9470}
9471
9472static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9473{
9474 struct cnic_ops *c_ops;
9475 int rc = 0;
9476
9477 rcu_read_lock();
9478 c_ops = rcu_dereference(bp->cnic_ops);
9479 if (c_ops)
9480 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9481 rcu_read_unlock();
9482
9483 return rc;
9484}
9485
9486/*
9487 * for commands that have no data
9488 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00009489int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
Michael Chan993ac7b2009-10-10 13:46:56 +00009490{
9491 struct cnic_ctl_info ctl = {0};
9492
9493 ctl.cmd = cmd;
9494
9495 return bnx2x_cnic_ctl_send(bp, &ctl);
9496}
9497
9498static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
9499{
9500 struct cnic_ctl_info ctl;
9501
9502 /* first we tell CNIC and only then we count this as a completion */
9503 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
9504 ctl.data.comp.cid = cid;
9505
9506 bnx2x_cnic_ctl_send_bh(bp, &ctl);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009507 bnx2x_cnic_sp_post(bp, 0);
Michael Chan993ac7b2009-10-10 13:46:56 +00009508}
9509
9510static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
9511{
9512 struct bnx2x *bp = netdev_priv(dev);
9513 int rc = 0;
9514
9515 switch (ctl->cmd) {
9516 case DRV_CTL_CTXTBL_WR_CMD: {
9517 u32 index = ctl->data.io.offset;
9518 dma_addr_t addr = ctl->data.io.dma_addr;
9519
9520 bnx2x_ilt_wr(bp, index, addr);
9521 break;
9522 }
9523
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009524 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
9525 int count = ctl->data.credit.credit_count;
Michael Chan993ac7b2009-10-10 13:46:56 +00009526
9527 bnx2x_cnic_sp_post(bp, count);
9528 break;
9529 }
9530
9531 /* rtnl_lock is held. */
9532 case DRV_CTL_START_L2_CMD: {
9533 u32 cli = ctl->data.ring.client_id;
9534
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009535 /* Set iSCSI MAC address */
9536 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
9537
9538 mmiowb();
9539 barrier();
9540
9541 /* Start accepting on iSCSI L2 ring. Accept all multicasts
9542 * because it's the only way for UIO Client to accept
9543 * multicasts (in non-promiscuous mode only one Client per
9544 * function will receive multicast packets (leading in our
9545 * case).
9546 */
9547 bnx2x_rxq_set_mac_filters(bp, cli,
9548 BNX2X_ACCEPT_UNICAST |
9549 BNX2X_ACCEPT_BROADCAST |
9550 BNX2X_ACCEPT_ALL_MULTICAST);
9551 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9552
Michael Chan993ac7b2009-10-10 13:46:56 +00009553 break;
9554 }
9555
9556 /* rtnl_lock is held. */
9557 case DRV_CTL_STOP_L2_CMD: {
9558 u32 cli = ctl->data.ring.client_id;
9559
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009560 /* Stop accepting on iSCSI L2 ring */
9561 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
9562 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9563
9564 mmiowb();
9565 barrier();
9566
9567 /* Unset iSCSI L2 MAC */
9568 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
Michael Chan993ac7b2009-10-10 13:46:56 +00009569 break;
9570 }
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009571 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
9572 int count = ctl->data.credit.credit_count;
9573
9574 smp_mb__before_atomic_inc();
9575 atomic_add(count, &bp->spq_left);
9576 smp_mb__after_atomic_inc();
9577 break;
9578 }
Michael Chan993ac7b2009-10-10 13:46:56 +00009579
9580 default:
9581 BNX2X_ERR("unknown command %x\n", ctl->cmd);
9582 rc = -EINVAL;
9583 }
9584
9585 return rc;
9586}
9587
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00009588void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
Michael Chan993ac7b2009-10-10 13:46:56 +00009589{
9590 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9591
9592 if (bp->flags & USING_MSIX_FLAG) {
9593 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
9594 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
9595 cp->irq_arr[0].vector = bp->msix_table[1].vector;
9596 } else {
9597 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
9598 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
9599 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009600 if (CHIP_IS_E2(bp))
9601 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
9602 else
9603 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
9604
Michael Chan993ac7b2009-10-10 13:46:56 +00009605 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009606 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
Michael Chan993ac7b2009-10-10 13:46:56 +00009607 cp->irq_arr[1].status_blk = bp->def_status_blk;
9608 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009609 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
Michael Chan993ac7b2009-10-10 13:46:56 +00009610
9611 cp->num_irq = 2;
9612}
9613
9614static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
9615 void *data)
9616{
9617 struct bnx2x *bp = netdev_priv(dev);
9618 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9619
9620 if (ops == NULL)
9621 return -EINVAL;
9622
9623 if (atomic_read(&bp->intr_sem) != 0)
9624 return -EBUSY;
9625
9626 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
9627 if (!bp->cnic_kwq)
9628 return -ENOMEM;
9629
9630 bp->cnic_kwq_cons = bp->cnic_kwq;
9631 bp->cnic_kwq_prod = bp->cnic_kwq;
9632 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
9633
9634 bp->cnic_spq_pending = 0;
9635 bp->cnic_kwq_pending = 0;
9636
9637 bp->cnic_data = data;
9638
9639 cp->num_irq = 0;
9640 cp->drv_state = CNIC_DRV_STATE_REGD;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009641 cp->iro_arr = bp->iro_arr;
Michael Chan993ac7b2009-10-10 13:46:56 +00009642
Michael Chan993ac7b2009-10-10 13:46:56 +00009643 bnx2x_setup_cnic_irq_info(bp);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009644
Michael Chan993ac7b2009-10-10 13:46:56 +00009645 rcu_assign_pointer(bp->cnic_ops, ops);
9646
9647 return 0;
9648}
9649
9650static int bnx2x_unregister_cnic(struct net_device *dev)
9651{
9652 struct bnx2x *bp = netdev_priv(dev);
9653 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9654
9655 mutex_lock(&bp->cnic_mutex);
9656 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
9657 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
9658 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
9659 }
9660 cp->drv_state = 0;
9661 rcu_assign_pointer(bp->cnic_ops, NULL);
9662 mutex_unlock(&bp->cnic_mutex);
9663 synchronize_rcu();
9664 kfree(bp->cnic_kwq);
9665 bp->cnic_kwq = NULL;
9666
9667 return 0;
9668}
9669
9670struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
9671{
9672 struct bnx2x *bp = netdev_priv(dev);
9673 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9674
9675 cp->drv_owner = THIS_MODULE;
9676 cp->chip_id = CHIP_ID(bp);
9677 cp->pdev = bp->pdev;
9678 cp->io_base = bp->regview;
9679 cp->io_base2 = bp->doorbells;
9680 cp->max_kwqe_pending = 8;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009681 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009682 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
9683 bnx2x_cid_ilt_lines(bp);
Michael Chan993ac7b2009-10-10 13:46:56 +00009684 cp->ctx_tbl_len = CNIC_ILT_LINES;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009685 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
Michael Chan993ac7b2009-10-10 13:46:56 +00009686 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
9687 cp->drv_ctl = bnx2x_drv_ctl;
9688 cp->drv_register_cnic = bnx2x_register_cnic;
9689 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009690 cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID;
9691 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
Michael Chan993ac7b2009-10-10 13:46:56 +00009692
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009693 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
9694 "starting cid %d\n",
9695 cp->ctx_blk_size,
9696 cp->ctx_tbl_offset,
9697 cp->ctx_tbl_len,
9698 cp->starting_cid);
Michael Chan993ac7b2009-10-10 13:46:56 +00009699 return cp;
9700}
9701EXPORT_SYMBOL(bnx2x_cnic_probe);
9702
9703#endif /* BCM_CNIC */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009704