blob: bc516bbcd9fd985a413cc6756dab6c02e4a264ec [file] [log] [blame]
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001/* bnx2x_main.c: Broadcom Everest network driver.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002 *
Dmitry Kravkov5de92402011-05-04 23:51:13 +00003 * Copyright (c) 2007-2011 Broadcom Corporation
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
Eilon Greenstein24e3fce2008-06-12 14:30:28 -07009 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
Eilon Greensteinca003922009-08-12 22:53:28 -070013 * Slowpath and fastpath rework by Vladislav Zolotarov
Eliezer Tamirc14423f2008-02-28 11:49:42 -080014 * Statistics and Link management by Yitchak Gertner
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020015 *
16 */
17
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020018#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020026#include <linux/interrupt.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/dma-mapping.h>
33#include <linux/bitops.h>
34#include <linux/irq.h>
35#include <linux/delay.h>
36#include <asm/byteorder.h>
37#include <linux/time.h>
38#include <linux/ethtool.h>
39#include <linux/mii.h>
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080040#include <linux/if_vlan.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020041#include <net/ip.h>
42#include <net/tcp.h>
43#include <net/checksum.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070044#include <net/ip6_checksum.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020045#include <linux/workqueue.h>
46#include <linux/crc32.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070047#include <linux/crc32c.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020048#include <linux/prefetch.h>
49#include <linux/zlib.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020050#include <linux/io.h>
Ben Hutchings45229b42009-11-07 11:53:39 +000051#include <linux/stringify.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020052
Dmitry Kravkovb0efbb92010-07-27 12:33:43 +000053#define BNX2X_MAIN
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020054#include "bnx2x.h"
55#include "bnx2x_init.h"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070056#include "bnx2x_init_ops.h"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000057#include "bnx2x_cmn.h"
Vladislav Zolotarove4901dd2010-12-13 05:44:18 +000058#include "bnx2x_dcb.h"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020059
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070060#include <linux/firmware.h>
61#include "bnx2x_fw_file_hdr.h"
62/* FW files */
Ben Hutchings45229b42009-11-07 11:53:39 +000063#define FW_FILE_VERSION \
64 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
65 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
66 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
67 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
Dmitry Kravkov560131f2010-10-06 03:18:47 +000068#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
69#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000070#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070071
Eilon Greenstein34f80b02008-06-23 20:33:01 -070072/* Time in jiffies before concluding the transmitter is hung */
73#define TX_TIMEOUT (5*HZ)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020074
Andrew Morton53a10562008-02-09 23:16:41 -080075static char version[] __devinitdata =
Eilon Greenstein34f80b02008-06-23 20:33:01 -070076 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020077 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78
Eilon Greenstein24e3fce2008-06-12 14:30:28 -070079MODULE_AUTHOR("Eliezer Tamir");
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000080MODULE_DESCRIPTION("Broadcom NetXtreme II "
81 "BCM57710/57711/57711E/57712/57712E Driver");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020082MODULE_LICENSE("GPL");
83MODULE_VERSION(DRV_MODULE_VERSION);
Ben Hutchings45229b42009-11-07 11:53:39 +000084MODULE_FIRMWARE(FW_FILE_NAME_E1);
85MODULE_FIRMWARE(FW_FILE_NAME_E1H);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000086MODULE_FIRMWARE(FW_FILE_NAME_E2);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020087
Eilon Greenstein555f6c72009-02-12 08:36:11 +000088static int multi_mode = 1;
89module_param(multi_mode, int, 0);
Eilon Greensteinca003922009-08-12 22:53:28 -070090MODULE_PARM_DESC(multi_mode, " Multi queue mode "
91 "(0 Disable; 1 Enable (default))");
92
Dmitry Kravkovd6214d72010-10-06 03:32:10 +000093int num_queues;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000094module_param(num_queues, int, 0);
95MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
96 " (default is as a number of CPUs)");
Eilon Greenstein555f6c72009-02-12 08:36:11 +000097
Eilon Greenstein19680c42008-08-13 15:47:33 -070098static int disable_tpa;
Eilon Greenstein19680c42008-08-13 15:47:33 -070099module_param(disable_tpa, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000100MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
Eilon Greenstein8badd272009-02-12 08:36:15 +0000101
102static int int_mode;
103module_param(int_mode, int, 0);
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000104MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
105 "(1 INT#x; 2 MSI)");
Eilon Greenstein8badd272009-02-12 08:36:15 +0000106
Eilon Greensteina18f5122009-08-12 08:23:26 +0000107static int dropless_fc;
108module_param(dropless_fc, int, 0);
109MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110
Eilon Greenstein9898f862009-02-12 08:38:27 +0000111static int poll;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200112module_param(poll, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000113MODULE_PARM_DESC(poll, " Use polling (for debug)");
Eilon Greenstein8d5726c2009-02-12 08:37:19 +0000114
115static int mrrs = -1;
116module_param(mrrs, int, 0);
117MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
Eilon Greenstein9898f862009-02-12 08:38:27 +0000119static int debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200120module_param(debug, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000121MODULE_PARM_DESC(debug, " Default debug msglevel");
122
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800123static struct workqueue_struct *bnx2x_wq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200124
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000125#ifdef BCM_CNIC
126static u8 ALL_ENODE_MACS[] = {0x01, 0x10, 0x18, 0x01, 0x00, 0x01};
127#endif
128
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200129enum bnx2x_board_type {
130 BCM57710 = 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700131 BCM57711 = 1,
132 BCM57711E = 2,
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000133 BCM57712 = 3,
134 BCM57712E = 4
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200135};
136
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700137/* indexed by board_type, above */
Andrew Morton53a10562008-02-09 23:16:41 -0800138static struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200139 char *name;
140} board_info[] __devinitdata = {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700141 { "Broadcom NetXtreme II BCM57710 XGb" },
142 { "Broadcom NetXtreme II BCM57711 XGb" },
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000143 { "Broadcom NetXtreme II BCM57711E XGb" },
144 { "Broadcom NetXtreme II BCM57712 XGb" },
145 { "Broadcom NetXtreme II BCM57712E XGb" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200146};
147
Alexey Dobriyana3aa1882010-01-07 11:58:11 +0000148static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
Eilon Greensteine4ed7112009-08-12 08:24:10 +0000149 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
150 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
151 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000152 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
153 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200154 { 0 }
155};
156
157MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
158
159/****************************************************************************
160* General service functions
161****************************************************************************/
162
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000163static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
164 u32 addr, dma_addr_t mapping)
165{
166 REG_WR(bp, addr, U64_LO(mapping));
167 REG_WR(bp, addr + 4, U64_HI(mapping));
168}
169
170static inline void __storm_memset_fill(struct bnx2x *bp,
171 u32 addr, size_t size, u32 val)
172{
173 int i;
174 for (i = 0; i < size/4; i++)
175 REG_WR(bp, addr + (i * 4), val);
176}
177
178static inline void storm_memset_ustats_zero(struct bnx2x *bp,
179 u8 port, u16 stat_id)
180{
181 size_t size = sizeof(struct ustorm_per_client_stats);
182
183 u32 addr = BAR_USTRORM_INTMEM +
184 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
185
186 __storm_memset_fill(bp, addr, size, 0);
187}
188
189static inline void storm_memset_tstats_zero(struct bnx2x *bp,
190 u8 port, u16 stat_id)
191{
192 size_t size = sizeof(struct tstorm_per_client_stats);
193
194 u32 addr = BAR_TSTRORM_INTMEM +
195 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
196
197 __storm_memset_fill(bp, addr, size, 0);
198}
199
200static inline void storm_memset_xstats_zero(struct bnx2x *bp,
201 u8 port, u16 stat_id)
202{
203 size_t size = sizeof(struct xstorm_per_client_stats);
204
205 u32 addr = BAR_XSTRORM_INTMEM +
206 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
207
208 __storm_memset_fill(bp, addr, size, 0);
209}
210
211
212static inline void storm_memset_spq_addr(struct bnx2x *bp,
213 dma_addr_t mapping, u16 abs_fid)
214{
215 u32 addr = XSEM_REG_FAST_MEMORY +
216 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
217
218 __storm_memset_dma_mapping(bp, addr, mapping);
219}
220
221static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
222{
223 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
224}
225
226static inline void storm_memset_func_cfg(struct bnx2x *bp,
227 struct tstorm_eth_function_common_config *tcfg,
228 u16 abs_fid)
229{
230 size_t size = sizeof(struct tstorm_eth_function_common_config);
231
232 u32 addr = BAR_TSTRORM_INTMEM +
233 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
234
235 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
236}
237
238static inline void storm_memset_xstats_flags(struct bnx2x *bp,
239 struct stats_indication_flags *flags,
240 u16 abs_fid)
241{
242 size_t size = sizeof(struct stats_indication_flags);
243
244 u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
245
246 __storm_memset_struct(bp, addr, size, (u32 *)flags);
247}
248
249static inline void storm_memset_tstats_flags(struct bnx2x *bp,
250 struct stats_indication_flags *flags,
251 u16 abs_fid)
252{
253 size_t size = sizeof(struct stats_indication_flags);
254
255 u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
256
257 __storm_memset_struct(bp, addr, size, (u32 *)flags);
258}
259
260static inline void storm_memset_ustats_flags(struct bnx2x *bp,
261 struct stats_indication_flags *flags,
262 u16 abs_fid)
263{
264 size_t size = sizeof(struct stats_indication_flags);
265
266 u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
267
268 __storm_memset_struct(bp, addr, size, (u32 *)flags);
269}
270
271static inline void storm_memset_cstats_flags(struct bnx2x *bp,
272 struct stats_indication_flags *flags,
273 u16 abs_fid)
274{
275 size_t size = sizeof(struct stats_indication_flags);
276
277 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
278
279 __storm_memset_struct(bp, addr, size, (u32 *)flags);
280}
281
282static inline void storm_memset_xstats_addr(struct bnx2x *bp,
283 dma_addr_t mapping, u16 abs_fid)
284{
285 u32 addr = BAR_XSTRORM_INTMEM +
286 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
287
288 __storm_memset_dma_mapping(bp, addr, mapping);
289}
290
291static inline void storm_memset_tstats_addr(struct bnx2x *bp,
292 dma_addr_t mapping, u16 abs_fid)
293{
294 u32 addr = BAR_TSTRORM_INTMEM +
295 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
296
297 __storm_memset_dma_mapping(bp, addr, mapping);
298}
299
300static inline void storm_memset_ustats_addr(struct bnx2x *bp,
301 dma_addr_t mapping, u16 abs_fid)
302{
303 u32 addr = BAR_USTRORM_INTMEM +
304 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
305
306 __storm_memset_dma_mapping(bp, addr, mapping);
307}
308
309static inline void storm_memset_cstats_addr(struct bnx2x *bp,
310 dma_addr_t mapping, u16 abs_fid)
311{
312 u32 addr = BAR_CSTRORM_INTMEM +
313 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
314
315 __storm_memset_dma_mapping(bp, addr, mapping);
316}
317
318static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
319 u16 pf_id)
320{
321 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
322 pf_id);
323 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
324 pf_id);
325 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
326 pf_id);
327 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
328 pf_id);
329}
330
331static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
332 u8 enable)
333{
334 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
335 enable);
336 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
337 enable);
338 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
339 enable);
340 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
341 enable);
342}
343
344static inline void storm_memset_eq_data(struct bnx2x *bp,
345 struct event_ring_data *eq_data,
346 u16 pfid)
347{
348 size_t size = sizeof(struct event_ring_data);
349
350 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
351
352 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
353}
354
355static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
356 u16 pfid)
357{
358 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
359 REG_WR16(bp, addr, eq_prod);
360}
361
362static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
363 u16 fw_sb_id, u8 sb_index,
364 u8 ticks)
365{
366
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000367 int index_offset = CHIP_IS_E2(bp) ?
368 offsetof(struct hc_status_block_data_e2, index_data) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000369 offsetof(struct hc_status_block_data_e1x, index_data);
370 u32 addr = BAR_CSTRORM_INTMEM +
371 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
372 index_offset +
373 sizeof(struct hc_index_data)*sb_index +
374 offsetof(struct hc_index_data, timeout);
375 REG_WR8(bp, addr, ticks);
376 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
377 port, fw_sb_id, sb_index, ticks);
378}
379static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
380 u16 fw_sb_id, u8 sb_index,
381 u8 disable)
382{
383 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000384 int index_offset = CHIP_IS_E2(bp) ?
385 offsetof(struct hc_status_block_data_e2, index_data) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000386 offsetof(struct hc_status_block_data_e1x, index_data);
387 u32 addr = BAR_CSTRORM_INTMEM +
388 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
389 index_offset +
390 sizeof(struct hc_index_data)*sb_index +
391 offsetof(struct hc_index_data, flags);
392 u16 flags = REG_RD16(bp, addr);
393 /* clear and set */
394 flags &= ~HC_INDEX_DATA_HC_ENABLED;
395 flags |= enable_flag;
396 REG_WR16(bp, addr, flags);
397 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
398 port, fw_sb_id, sb_index, disable);
399}
400
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200401/* used only at init
402 * locking is done by mcp
403 */
stephen hemminger8d962862010-10-21 07:50:56 +0000404static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200405{
406 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
407 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
408 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
409 PCICFG_VENDOR_ID_OFFSET);
410}
411
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200412static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
413{
414 u32 val;
415
416 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
417 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
418 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
419 PCICFG_VENDOR_ID_OFFSET);
420
421 return val;
422}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200423
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000424#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
425#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
426#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
427#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
428#define DMAE_DP_DST_NONE "dst_addr [none]"
429
stephen hemminger8d962862010-10-21 07:50:56 +0000430static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
431 int msglvl)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000432{
433 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
434
435 switch (dmae->opcode & DMAE_COMMAND_DST) {
436 case DMAE_CMD_DST_PCI:
437 if (src_type == DMAE_CMD_SRC_PCI)
438 DP(msglvl, "DMAE: opcode 0x%08x\n"
439 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
440 "comp_addr [%x:%08x], comp_val 0x%08x\n",
441 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
442 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
443 dmae->comp_addr_hi, dmae->comp_addr_lo,
444 dmae->comp_val);
445 else
446 DP(msglvl, "DMAE: opcode 0x%08x\n"
447 "src [%08x], len [%d*4], dst [%x:%08x]\n"
448 "comp_addr [%x:%08x], comp_val 0x%08x\n",
449 dmae->opcode, dmae->src_addr_lo >> 2,
450 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
451 dmae->comp_addr_hi, dmae->comp_addr_lo,
452 dmae->comp_val);
453 break;
454 case DMAE_CMD_DST_GRC:
455 if (src_type == DMAE_CMD_SRC_PCI)
456 DP(msglvl, "DMAE: opcode 0x%08x\n"
457 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
458 "comp_addr [%x:%08x], comp_val 0x%08x\n",
459 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
460 dmae->len, dmae->dst_addr_lo >> 2,
461 dmae->comp_addr_hi, dmae->comp_addr_lo,
462 dmae->comp_val);
463 else
464 DP(msglvl, "DMAE: opcode 0x%08x\n"
465 "src [%08x], len [%d*4], dst [%08x]\n"
466 "comp_addr [%x:%08x], comp_val 0x%08x\n",
467 dmae->opcode, dmae->src_addr_lo >> 2,
468 dmae->len, dmae->dst_addr_lo >> 2,
469 dmae->comp_addr_hi, dmae->comp_addr_lo,
470 dmae->comp_val);
471 break;
472 default:
473 if (src_type == DMAE_CMD_SRC_PCI)
474 DP(msglvl, "DMAE: opcode 0x%08x\n"
475 DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
476 "dst_addr [none]\n"
477 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
478 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
479 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
480 dmae->comp_val);
481 else
482 DP(msglvl, "DMAE: opcode 0x%08x\n"
483 DP_LEVEL "src_addr [%08x] len [%d * 4] "
484 "dst_addr [none]\n"
485 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
486 dmae->opcode, dmae->src_addr_lo >> 2,
487 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
488 dmae->comp_val);
489 break;
490 }
491
492}
493
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000494const u32 dmae_reg_go_c[] = {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200495 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
496 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
497 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
498 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
499};
500
501/* copy command into DMAE command memory and set DMAE command go */
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000502void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200503{
504 u32 cmd_offset;
505 int i;
506
507 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
508 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
509 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
510
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700511 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
512 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200513 }
514 REG_WR(bp, dmae_reg_go_c[idx], 1);
515}
516
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000517u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
518{
519 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
520 DMAE_CMD_C_ENABLE);
521}
522
523u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
524{
525 return opcode & ~DMAE_CMD_SRC_RESET;
526}
527
528u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
529 bool with_comp, u8 comp_type)
530{
531 u32 opcode = 0;
532
533 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
534 (dst_type << DMAE_COMMAND_DST_SHIFT));
535
536 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
537
538 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
539 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
540 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
541 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
542
543#ifdef __BIG_ENDIAN
544 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
545#else
546 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
547#endif
548 if (with_comp)
549 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
550 return opcode;
551}
552
stephen hemminger8d962862010-10-21 07:50:56 +0000553static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
554 struct dmae_command *dmae,
555 u8 src_type, u8 dst_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000556{
557 memset(dmae, 0, sizeof(struct dmae_command));
558
559 /* set the opcode */
560 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
561 true, DMAE_COMP_PCI);
562
563 /* fill in the completion parameters */
564 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
565 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
566 dmae->comp_val = DMAE_COMP_VAL;
567}
568
569/* issue a dmae command over the init-channel and wailt for completion */
stephen hemminger8d962862010-10-21 07:50:56 +0000570static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
571 struct dmae_command *dmae)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000572{
573 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
Dmitry Kravkov5e374b52011-05-22 10:09:19 +0000574 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000575 int rc = 0;
576
577 DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
578 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
579 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
580
581 /* lock the dmae channel */
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -0800582 spin_lock_bh(&bp->dmae_lock);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000583
584 /* reset completion */
585 *wb_comp = 0;
586
587 /* post the command on the channel used for initializations */
588 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
589
590 /* wait for completion */
591 udelay(5);
592 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
593 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
594
595 if (!cnt) {
596 BNX2X_ERR("DMAE timeout!\n");
597 rc = DMAE_TIMEOUT;
598 goto unlock;
599 }
600 cnt--;
601 udelay(50);
602 }
603 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
604 BNX2X_ERR("DMAE PCI error!\n");
605 rc = DMAE_PCI_ERROR;
606 }
607
608 DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
609 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
610 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
611
612unlock:
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -0800613 spin_unlock_bh(&bp->dmae_lock);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000614 return rc;
615}
616
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700617void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
618 u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200619{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000620 struct dmae_command dmae;
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700621
622 if (!bp->dmae_ready) {
623 u32 *data = bnx2x_sp(bp, wb_data[0]);
624
625 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
626 " using indirect\n", dst_addr, len32);
627 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
628 return;
629 }
630
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000631 /* set opcode and fixed command fields */
632 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200633
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000634 /* fill in addresses and len */
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000635 dmae.src_addr_lo = U64_LO(dma_addr);
636 dmae.src_addr_hi = U64_HI(dma_addr);
637 dmae.dst_addr_lo = dst_addr >> 2;
638 dmae.dst_addr_hi = 0;
639 dmae.len = len32;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200640
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000641 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200642
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000643 /* issue the command and wait for completion */
644 bnx2x_issue_dmae_with_comp(bp, &dmae);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200645}
646
Yaniv Rosnerc18487e2008-06-23 20:27:52 -0700647void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200648{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000649 struct dmae_command dmae;
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700650
651 if (!bp->dmae_ready) {
652 u32 *data = bnx2x_sp(bp, wb_data[0]);
653 int i;
654
655 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
656 " using indirect\n", src_addr, len32);
657 for (i = 0; i < len32; i++)
658 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
659 return;
660 }
661
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000662 /* set opcode and fixed command fields */
663 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200664
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000665 /* fill in addresses and len */
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000666 dmae.src_addr_lo = src_addr >> 2;
667 dmae.src_addr_hi = 0;
668 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
669 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
670 dmae.len = len32;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200671
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000672 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200673
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000674 /* issue the command and wait for completion */
675 bnx2x_issue_dmae_with_comp(bp, &dmae);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200676}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200677
stephen hemminger8d962862010-10-21 07:50:56 +0000678static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
679 u32 addr, u32 len)
Eilon Greenstein573f2032009-08-12 08:24:14 +0000680{
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000681 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
Eilon Greenstein573f2032009-08-12 08:24:14 +0000682 int offset = 0;
683
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000684 while (len > dmae_wr_max) {
Eilon Greenstein573f2032009-08-12 08:24:14 +0000685 bnx2x_write_dmae(bp, phys_addr + offset,
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000686 addr + offset, dmae_wr_max);
687 offset += dmae_wr_max * 4;
688 len -= dmae_wr_max;
Eilon Greenstein573f2032009-08-12 08:24:14 +0000689 }
690
691 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
692}
693
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700694/* used only for slowpath so not inlined */
695static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
696{
697 u32 wb_write[2];
698
699 wb_write[0] = val_hi;
700 wb_write[1] = val_lo;
701 REG_WR_DMAE(bp, reg, wb_write, 2);
702}
703
704#ifdef USE_WB_RD
705static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
706{
707 u32 wb_data[2];
708
709 REG_RD_DMAE(bp, reg, wb_data, 2);
710
711 return HILO_U64(wb_data[0], wb_data[1]);
712}
713#endif
714
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200715static int bnx2x_mc_assert(struct bnx2x *bp)
716{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200717 char last_idx;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700718 int i, rc = 0;
719 u32 row0, row1, row2, row3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200720
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700721 /* XSTORM */
722 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
723 XSTORM_ASSERT_LIST_INDEX_OFFSET);
724 if (last_idx)
725 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200726
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700727 /* print the asserts */
728 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200729
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700730 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
731 XSTORM_ASSERT_LIST_OFFSET(i));
732 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
733 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
734 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
735 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
736 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
737 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200738
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700739 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
740 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
741 " 0x%08x 0x%08x 0x%08x\n",
742 i, row3, row2, row1, row0);
743 rc++;
744 } else {
745 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200746 }
747 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700748
749 /* TSTORM */
750 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
751 TSTORM_ASSERT_LIST_INDEX_OFFSET);
752 if (last_idx)
753 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
754
755 /* print the asserts */
756 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
757
758 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
759 TSTORM_ASSERT_LIST_OFFSET(i));
760 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
761 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
762 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
763 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
764 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
765 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
766
767 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
768 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
769 " 0x%08x 0x%08x 0x%08x\n",
770 i, row3, row2, row1, row0);
771 rc++;
772 } else {
773 break;
774 }
775 }
776
777 /* CSTORM */
778 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
779 CSTORM_ASSERT_LIST_INDEX_OFFSET);
780 if (last_idx)
781 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
782
783 /* print the asserts */
784 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
785
786 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
787 CSTORM_ASSERT_LIST_OFFSET(i));
788 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
789 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
790 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
791 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
792 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
793 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
794
795 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
796 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
797 " 0x%08x 0x%08x 0x%08x\n",
798 i, row3, row2, row1, row0);
799 rc++;
800 } else {
801 break;
802 }
803 }
804
805 /* USTORM */
806 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
807 USTORM_ASSERT_LIST_INDEX_OFFSET);
808 if (last_idx)
809 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
810
811 /* print the asserts */
812 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
813
814 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
815 USTORM_ASSERT_LIST_OFFSET(i));
816 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
817 USTORM_ASSERT_LIST_OFFSET(i) + 4);
818 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
819 USTORM_ASSERT_LIST_OFFSET(i) + 8);
820 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
821 USTORM_ASSERT_LIST_OFFSET(i) + 12);
822
823 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
824 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
825 " 0x%08x 0x%08x 0x%08x\n",
826 i, row3, row2, row1, row0);
827 rc++;
828 } else {
829 break;
830 }
831 }
832
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200833 return rc;
834}
Eliezer Tamirc14423f2008-02-28 11:49:42 -0800835
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200836static void bnx2x_fw_dump(struct bnx2x *bp)
837{
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000838 u32 addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200839 u32 mark, offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +0000840 __be32 data[9];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200841 int word;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000842 u32 trace_shmem_base;
Vladislav Zolotarov2145a922010-04-19 01:13:49 +0000843 if (BP_NOMCP(bp)) {
844 BNX2X_ERR("NO MCP - can not dump\n");
845 return;
846 }
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000847
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000848 if (BP_PATH(bp) == 0)
849 trace_shmem_base = bp->common.shmem_base;
850 else
851 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
852 addr = trace_shmem_base - 0x0800 + 4;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000853 mark = REG_RD(bp, addr);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000854 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
855 + ((mark + 0x3) & ~0x3) - 0x08000000;
Joe Perches7995c642010-02-17 15:01:52 +0000856 pr_err("begin fw dump (mark 0x%x)\n", mark);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200857
Joe Perches7995c642010-02-17 15:01:52 +0000858 pr_err("");
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000859 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200860 for (word = 0; word < 8; word++)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000861 data[word] = htonl(REG_RD(bp, offset + 4*word));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200862 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000863 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200864 }
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000865 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200866 for (word = 0; word < 8; word++)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000867 data[word] = htonl(REG_RD(bp, offset + 4*word));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200868 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000869 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200870 }
Joe Perches7995c642010-02-17 15:01:52 +0000871 pr_err("end of fw dump\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200872}
873
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000874void bnx2x_panic_dump(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200875{
876 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000877 u16 j;
878 struct hc_sp_status_block_data sp_sb_data;
879 int func = BP_FUNC(bp);
880#ifdef BNX2X_STOP_ON_ERROR
881 u16 start = 0, end = 0;
882#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200883
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700884 bp->stats_state = STATS_STATE_DISABLED;
885 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
886
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200887 BNX2X_ERR("begin crash dump -----------------\n");
888
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000889 /* Indices */
890 /* Common */
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000891 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000892 " spq_prod_idx(0x%x)\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000893 bp->def_idx, bp->def_att_idx,
894 bp->attn_state, bp->spq_prod_idx);
895 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
896 bp->def_status_blk->atten_status_block.attn_bits,
897 bp->def_status_blk->atten_status_block.attn_bits_ack,
898 bp->def_status_blk->atten_status_block.status_block_id,
899 bp->def_status_blk->atten_status_block.attn_bits_index);
900 BNX2X_ERR(" def (");
901 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
902 pr_cont("0x%x%s",
903 bp->def_status_blk->sp_sb.index_values[i],
904 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000905
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000906 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
907 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
908 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
909 i*sizeof(u32));
910
911 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
912 "pf_id(0x%x) vnic_id(0x%x) "
913 "vf_id(0x%x) vf_valid (0x%x)\n",
914 sp_sb_data.igu_sb_id,
915 sp_sb_data.igu_seg_id,
916 sp_sb_data.p_func.pf_id,
917 sp_sb_data.p_func.vnic_id,
918 sp_sb_data.p_func.vf_id,
919 sp_sb_data.p_func.vf_valid);
920
921
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000922 for_each_eth_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000923 struct bnx2x_fastpath *fp = &bp->fp[i];
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000924 int loop;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000925 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000926 struct hc_status_block_data_e1x sb_data_e1x;
927 struct hc_status_block_sm *hc_sm_p =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000928 CHIP_IS_E2(bp) ?
929 sb_data_e2.common.state_machine :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000930 sb_data_e1x.common.state_machine;
931 struct hc_index_data *hc_index_p =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000932 CHIP_IS_E2(bp) ?
933 sb_data_e2.index_data :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000934 sb_data_e1x.index_data;
935 int data_size;
936 u32 *sb_data_p;
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000937
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000938 /* Rx */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000939 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000940 " rx_comp_prod(0x%x)"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000941 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000942 i, fp->rx_bd_prod, fp->rx_bd_cons,
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000943 fp->rx_comp_prod,
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000944 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000945 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000946 " fp_hc_idx(0x%x)\n",
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000947 fp->rx_sge_prod, fp->last_max_sge,
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000948 le16_to_cpu(fp->fp_hc_idx));
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000949
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000950 /* Tx */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000951 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
952 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
953 " *tx_cons_sb(0x%x)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200954 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700955 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000956
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000957 loop = CHIP_IS_E2(bp) ?
958 HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000959
960 /* host sb data */
961
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000962#ifdef BCM_CNIC
963 if (IS_FCOE_FP(fp))
964 continue;
965#endif
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000966 BNX2X_ERR(" run indexes (");
967 for (j = 0; j < HC_SB_MAX_SM; j++)
968 pr_cont("0x%x%s",
969 fp->sb_running_index[j],
970 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
971
972 BNX2X_ERR(" indexes (");
973 for (j = 0; j < loop; j++)
974 pr_cont("0x%x%s",
975 fp->sb_index_values[j],
976 (j == loop - 1) ? ")" : " ");
977 /* fw sb data */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000978 data_size = CHIP_IS_E2(bp) ?
979 sizeof(struct hc_status_block_data_e2) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000980 sizeof(struct hc_status_block_data_e1x);
981 data_size /= sizeof(u32);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000982 sb_data_p = CHIP_IS_E2(bp) ?
983 (u32 *)&sb_data_e2 :
984 (u32 *)&sb_data_e1x;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000985 /* copy sb data in here */
986 for (j = 0; j < data_size; j++)
987 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
988 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
989 j * sizeof(u32));
990
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000991 if (CHIP_IS_E2(bp)) {
992 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
993 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
994 sb_data_e2.common.p_func.pf_id,
995 sb_data_e2.common.p_func.vf_id,
996 sb_data_e2.common.p_func.vf_valid,
997 sb_data_e2.common.p_func.vnic_id,
998 sb_data_e2.common.same_igu_sb_1b);
999 } else {
1000 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
1001 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
1002 sb_data_e1x.common.p_func.pf_id,
1003 sb_data_e1x.common.p_func.vf_id,
1004 sb_data_e1x.common.p_func.vf_valid,
1005 sb_data_e1x.common.p_func.vnic_id,
1006 sb_data_e1x.common.same_igu_sb_1b);
1007 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001008
1009 /* SB_SMs data */
1010 for (j = 0; j < HC_SB_MAX_SM; j++) {
1011 pr_cont("SM[%d] __flags (0x%x) "
1012 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
1013 "time_to_expire (0x%x) "
1014 "timer_value(0x%x)\n", j,
1015 hc_sm_p[j].__flags,
1016 hc_sm_p[j].igu_sb_id,
1017 hc_sm_p[j].igu_seg_id,
1018 hc_sm_p[j].time_to_expire,
1019 hc_sm_p[j].timer_value);
1020 }
1021
1022 /* Indecies data */
1023 for (j = 0; j < loop; j++) {
1024 pr_cont("INDEX[%d] flags (0x%x) "
1025 "timeout (0x%x)\n", j,
1026 hc_index_p[j].flags,
1027 hc_index_p[j].timeout);
1028 }
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001029 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001030
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001031#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001032 /* Rings */
1033 /* Rx */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001034 for_each_rx_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001035 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001036
1037 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1038 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001039 for (j = start; j != end; j = RX_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001040 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1041 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1042
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001043 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1044 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001045 }
1046
Eilon Greenstein3196a882008-08-13 15:58:49 -07001047 start = RX_SGE(fp->rx_sge_prod);
1048 end = RX_SGE(fp->last_max_sge);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001049 for (j = start; j != end; j = RX_SGE(j + 1)) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001050 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1051 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1052
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001053 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1054 i, j, rx_sge[1], rx_sge[0], sw_page->page);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001055 }
1056
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001057 start = RCQ_BD(fp->rx_comp_cons - 10);
1058 end = RCQ_BD(fp->rx_comp_cons + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001059 for (j = start; j != end; j = RCQ_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001060 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1061
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001062 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1063 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001064 }
1065 }
1066
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001067 /* Tx */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001068 for_each_tx_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001069 struct bnx2x_fastpath *fp = &bp->fp[i];
1070
1071 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
1072 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
1073 for (j = start; j != end; j = TX_BD(j + 1)) {
1074 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
1075
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001076 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
1077 i, j, sw_bd->skb, sw_bd->first_bd);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001078 }
1079
1080 start = TX_BD(fp->tx_bd_cons - 10);
1081 end = TX_BD(fp->tx_bd_cons + 254);
1082 for (j = start; j != end; j = TX_BD(j + 1)) {
1083 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
1084
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001085 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
1086 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001087 }
1088 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001089#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001090 bnx2x_fw_dump(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001091 bnx2x_mc_assert(bp);
1092 BNX2X_ERR("end crash dump -----------------\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001093}
1094
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001095static void bnx2x_hc_int_enable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001096{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001097 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001098 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1099 u32 val = REG_RD(bp, addr);
1100 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +00001101 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001102
1103 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001104 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1105 HC_CONFIG_0_REG_INT_LINE_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001106 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1107 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eilon Greenstein8badd272009-02-12 08:36:15 +00001108 } else if (msi) {
1109 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1110 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1111 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1112 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001113 } else {
1114 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001115 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001116 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1117 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001118
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001119 if (!CHIP_IS_E1(bp)) {
1120 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1121 val, port, addr);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001122
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001123 REG_WR(bp, addr, val);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001124
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001125 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1126 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001127 }
1128
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001129 if (CHIP_IS_E1(bp))
1130 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1131
Eilon Greenstein8badd272009-02-12 08:36:15 +00001132 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
1133 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001134
1135 REG_WR(bp, addr, val);
Eilon Greenstein37dbbf32009-07-21 05:47:33 +00001136 /*
1137 * Ensure that HC_CONFIG is written before leading/trailing edge config
1138 */
1139 mmiowb();
1140 barrier();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001141
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001142 if (!CHIP_IS_E1(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001143 /* init leading/trailing edge */
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00001144 if (IS_MF(bp)) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001145 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001146 if (bp->port.pmf)
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001147 /* enable nig and gpio3 attention */
1148 val |= 0x1100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001149 } else
1150 val = 0xffff;
1151
1152 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1153 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1154 }
Eilon Greenstein37dbbf32009-07-21 05:47:33 +00001155
1156 /* Make sure that interrupts are indeed enabled from here on */
1157 mmiowb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001158}
1159
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001160static void bnx2x_igu_int_enable(struct bnx2x *bp)
1161{
1162 u32 val;
1163 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1164 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1165
1166 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1167
1168 if (msix) {
1169 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1170 IGU_PF_CONF_SINGLE_ISR_EN);
1171 val |= (IGU_PF_CONF_FUNC_EN |
1172 IGU_PF_CONF_MSI_MSIX_EN |
1173 IGU_PF_CONF_ATTN_BIT_EN);
1174 } else if (msi) {
1175 val &= ~IGU_PF_CONF_INT_LINE_EN;
1176 val |= (IGU_PF_CONF_FUNC_EN |
1177 IGU_PF_CONF_MSI_MSIX_EN |
1178 IGU_PF_CONF_ATTN_BIT_EN |
1179 IGU_PF_CONF_SINGLE_ISR_EN);
1180 } else {
1181 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1182 val |= (IGU_PF_CONF_FUNC_EN |
1183 IGU_PF_CONF_INT_LINE_EN |
1184 IGU_PF_CONF_ATTN_BIT_EN |
1185 IGU_PF_CONF_SINGLE_ISR_EN);
1186 }
1187
1188 DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
1189 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1190
1191 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1192
1193 barrier();
1194
1195 /* init leading/trailing edge */
1196 if (IS_MF(bp)) {
1197 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1198 if (bp->port.pmf)
1199 /* enable nig and gpio3 attention */
1200 val |= 0x1100;
1201 } else
1202 val = 0xffff;
1203
1204 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1205 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1206
1207 /* Make sure that interrupts are indeed enabled from here on */
1208 mmiowb();
1209}
1210
1211void bnx2x_int_enable(struct bnx2x *bp)
1212{
1213 if (bp->common.int_block == INT_BLOCK_HC)
1214 bnx2x_hc_int_enable(bp);
1215 else
1216 bnx2x_igu_int_enable(bp);
1217}
1218
1219static void bnx2x_hc_int_disable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001220{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001221 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001222 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1223 u32 val = REG_RD(bp, addr);
1224
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001225 /*
1226 * in E1 we must use only PCI configuration space to disable
1227 * MSI/MSIX capablility
1228 * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
1229 */
1230 if (CHIP_IS_E1(bp)) {
1231 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
1232 * Use mask register to prevent from HC sending interrupts
1233 * after we exit the function
1234 */
1235 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
1236
1237 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1238 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1239 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1240 } else
1241 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1242 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1243 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1244 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001245
1246 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1247 val, port, addr);
1248
Eilon Greenstein8badd272009-02-12 08:36:15 +00001249 /* flush all outstanding writes */
1250 mmiowb();
1251
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001252 REG_WR(bp, addr, val);
1253 if (REG_RD(bp, addr) != val)
1254 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1255}
1256
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001257static void bnx2x_igu_int_disable(struct bnx2x *bp)
1258{
1259 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1260
1261 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1262 IGU_PF_CONF_INT_LINE_EN |
1263 IGU_PF_CONF_ATTN_BIT_EN);
1264
1265 DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1266
1267 /* flush all outstanding writes */
1268 mmiowb();
1269
1270 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1271 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1272 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1273}
1274
stephen hemminger8d962862010-10-21 07:50:56 +00001275static void bnx2x_int_disable(struct bnx2x *bp)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001276{
1277 if (bp->common.int_block == INT_BLOCK_HC)
1278 bnx2x_hc_int_disable(bp);
1279 else
1280 bnx2x_igu_int_disable(bp);
1281}
1282
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001283void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001284{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001285 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +00001286 int i, offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001287
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07001288 if (disable_hw)
1289 /* prevent the HW from sending interrupts */
1290 bnx2x_int_disable(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001291
1292 /* make sure all ISRs are done */
1293 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001294 synchronize_irq(bp->msix_table[0].vector);
1295 offset = 1;
Michael Chan37b091b2009-10-10 13:46:55 +00001296#ifdef BCM_CNIC
1297 offset++;
1298#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001299 for_each_eth_queue(bp, i)
Eilon Greenstein8badd272009-02-12 08:36:15 +00001300 synchronize_irq(bp->msix_table[i + offset].vector);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001301 } else
1302 synchronize_irq(bp->pdev->irq);
1303
1304 /* make sure sp_task is not running */
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001305 cancel_delayed_work(&bp->sp_task);
1306 flush_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001307}
1308
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001309/* fast path */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001310
1311/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001312 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001313 */
1314
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001315/* Return true if succeeded to acquire the lock */
1316static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1317{
1318 u32 lock_status;
1319 u32 resource_bit = (1 << resource);
1320 int func = BP_FUNC(bp);
1321 u32 hw_lock_control_reg;
1322
1323 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1324
1325 /* Validating that the resource is within range */
1326 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1327 DP(NETIF_MSG_HW,
1328 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1329 resource, HW_LOCK_MAX_RESOURCE_VALUE);
Eric Dumazet0fdf4d02010-08-26 22:03:53 -07001330 return false;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001331 }
1332
1333 if (func <= 5)
1334 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1335 else
1336 hw_lock_control_reg =
1337 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1338
1339 /* Try to acquire the lock */
1340 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1341 lock_status = REG_RD(bp, hw_lock_control_reg);
1342 if (lock_status & resource_bit)
1343 return true;
1344
1345 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1346 return false;
1347}
1348
Michael Chan993ac7b2009-10-10 13:46:56 +00001349#ifdef BCM_CNIC
1350static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1351#endif
Eilon Greenstein3196a882008-08-13 15:58:49 -07001352
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001353void bnx2x_sp_event(struct bnx2x_fastpath *fp,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001354 union eth_rx_cqe *rr_cqe)
1355{
1356 struct bnx2x *bp = fp->bp;
1357 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1358 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1359
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001360 DP(BNX2X_MSG_SP,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001361 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +00001362 fp->index, cid, command, bp->state,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001363 rr_cqe->ramrod_cqe.ramrod_type);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001364
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001365 switch (command | fp->state) {
1366 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1367 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1368 fp->state = BNX2X_FP_STATE_OPEN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001369 break;
1370
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001371 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1372 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001373 fp->state = BNX2X_FP_STATE_HALTED;
1374 break;
1375
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001376 case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1377 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1378 fp->state = BNX2X_FP_STATE_TERMINATED;
Eliezer Tamir49d66772008-02-28 11:53:13 -08001379 break;
1380
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001381 default:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001382 BNX2X_ERR("unexpected MC reply (%d) "
1383 "fp[%d] state is %x\n",
1384 command, fp->index, fp->state);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001385 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001386 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001387
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00001388 smp_mb__before_atomic_inc();
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08001389 atomic_inc(&bp->cq_spq_left);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001390 /* push the change in fp->state and towards the memory */
1391 smp_wmb();
1392
1393 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001394}
1395
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001396irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001397{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001398 struct bnx2x *bp = netdev_priv(dev_instance);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001399 u16 status = bnx2x_ack_int(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001400 u16 mask;
Eilon Greensteinca003922009-08-12 22:53:28 -07001401 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001402
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001403 /* Return here if interrupt is shared and it's not for us */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001404 if (unlikely(status == 0)) {
1405 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1406 return IRQ_NONE;
1407 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001408 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001409
Eilon Greenstein3196a882008-08-13 15:58:49 -07001410#ifdef BNX2X_STOP_ON_ERROR
1411 if (unlikely(bp->panic))
1412 return IRQ_HANDLED;
1413#endif
1414
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001415 for_each_eth_queue(bp, i) {
Eilon Greensteinca003922009-08-12 22:53:28 -07001416 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001417
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001418 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
Eilon Greensteinca003922009-08-12 22:53:28 -07001419 if (status & mask) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001420 /* Handle Rx and Tx according to SB id */
1421 prefetch(fp->rx_cons_sb);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001422 prefetch(fp->tx_cons_sb);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001423 prefetch(&fp->sb_running_index[SM_RX_ID]);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001424 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
Eilon Greensteinca003922009-08-12 22:53:28 -07001425 status &= ~mask;
1426 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001427 }
1428
Michael Chan993ac7b2009-10-10 13:46:56 +00001429#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001430 mask = 0x2;
Michael Chan993ac7b2009-10-10 13:46:56 +00001431 if (status & (mask | 0x1)) {
1432 struct cnic_ops *c_ops = NULL;
1433
1434 rcu_read_lock();
1435 c_ops = rcu_dereference(bp->cnic_ops);
1436 if (c_ops)
1437 c_ops->cnic_handler(bp->cnic_data, NULL);
1438 rcu_read_unlock();
1439
1440 status &= ~mask;
1441 }
1442#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001443
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001444 if (unlikely(status & 0x1)) {
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001445 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001446
1447 status &= ~0x1;
1448 if (!status)
1449 return IRQ_HANDLED;
1450 }
1451
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00001452 if (unlikely(status))
1453 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001454 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001455
1456 return IRQ_HANDLED;
1457}
1458
1459/* end of fast path */
1460
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001461
1462/* Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001463
1464/*
1465 * General service functions
1466 */
1467
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001468int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001469{
Eliezer Tamirf1410642008-02-28 11:51:50 -08001470 u32 lock_status;
1471 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001472 int func = BP_FUNC(bp);
1473 u32 hw_lock_control_reg;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001474 int cnt;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001475
1476 /* Validating that the resource is within range */
1477 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1478 DP(NETIF_MSG_HW,
1479 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1480 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1481 return -EINVAL;
1482 }
1483
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001484 if (func <= 5) {
1485 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1486 } else {
1487 hw_lock_control_reg =
1488 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1489 }
1490
Eliezer Tamirf1410642008-02-28 11:51:50 -08001491 /* Validating that the resource is not already taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001492 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001493 if (lock_status & resource_bit) {
1494 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1495 lock_status, resource_bit);
1496 return -EEXIST;
1497 }
1498
Eilon Greenstein46230476b2008-08-25 15:23:30 -07001499 /* Try for 5 second every 5ms */
1500 for (cnt = 0; cnt < 1000; cnt++) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08001501 /* Try to acquire the lock */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001502 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1503 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001504 if (lock_status & resource_bit)
1505 return 0;
1506
1507 msleep(5);
1508 }
1509 DP(NETIF_MSG_HW, "Timeout\n");
1510 return -EAGAIN;
1511}
1512
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001513int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001514{
1515 u32 lock_status;
1516 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001517 int func = BP_FUNC(bp);
1518 u32 hw_lock_control_reg;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001519
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001520 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1521
Eliezer Tamirf1410642008-02-28 11:51:50 -08001522 /* Validating that the resource is within range */
1523 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1524 DP(NETIF_MSG_HW,
1525 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1526 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1527 return -EINVAL;
1528 }
1529
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001530 if (func <= 5) {
1531 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1532 } else {
1533 hw_lock_control_reg =
1534 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1535 }
1536
Eliezer Tamirf1410642008-02-28 11:51:50 -08001537 /* Validating that the resource is currently taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001538 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001539 if (!(lock_status & resource_bit)) {
1540 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1541 lock_status, resource_bit);
1542 return -EFAULT;
1543 }
1544
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001545 REG_WR(bp, hw_lock_control_reg, resource_bit);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001546 return 0;
1547}
1548
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001549
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001550int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1551{
1552 /* The GPIO should be swapped if swap register is set and active */
1553 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1554 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1555 int gpio_shift = gpio_num +
1556 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1557 u32 gpio_mask = (1 << gpio_shift);
1558 u32 gpio_reg;
1559 int value;
1560
1561 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1562 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1563 return -EINVAL;
1564 }
1565
1566 /* read GPIO value */
1567 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1568
1569 /* get the requested pin value */
1570 if ((gpio_reg & gpio_mask) == gpio_mask)
1571 value = 1;
1572 else
1573 value = 0;
1574
1575 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1576
1577 return value;
1578}
1579
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001580int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001581{
1582 /* The GPIO should be swapped if swap register is set and active */
1583 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001584 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001585 int gpio_shift = gpio_num +
1586 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1587 u32 gpio_mask = (1 << gpio_shift);
1588 u32 gpio_reg;
1589
1590 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1591 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1592 return -EINVAL;
1593 }
1594
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001595 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001596 /* read GPIO and mask except the float bits */
1597 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1598
1599 switch (mode) {
1600 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1601 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1602 gpio_num, gpio_shift);
1603 /* clear FLOAT and set CLR */
1604 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1605 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1606 break;
1607
1608 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1609 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1610 gpio_num, gpio_shift);
1611 /* clear FLOAT and set SET */
1612 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1613 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1614 break;
1615
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001616 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001617 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1618 gpio_num, gpio_shift);
1619 /* set FLOAT */
1620 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1621 break;
1622
1623 default:
1624 break;
1625 }
1626
1627 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001628 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001629
1630 return 0;
1631}
1632
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001633int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1634{
1635 /* The GPIO should be swapped if swap register is set and active */
1636 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1637 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1638 int gpio_shift = gpio_num +
1639 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1640 u32 gpio_mask = (1 << gpio_shift);
1641 u32 gpio_reg;
1642
1643 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1644 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1645 return -EINVAL;
1646 }
1647
1648 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1649 /* read GPIO int */
1650 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1651
1652 switch (mode) {
1653 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1654 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1655 "output low\n", gpio_num, gpio_shift);
1656 /* clear SET and set CLR */
1657 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1658 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1659 break;
1660
1661 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1662 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1663 "output high\n", gpio_num, gpio_shift);
1664 /* clear CLR and set SET */
1665 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1666 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1667 break;
1668
1669 default:
1670 break;
1671 }
1672
1673 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1674 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1675
1676 return 0;
1677}
1678
Eliezer Tamirf1410642008-02-28 11:51:50 -08001679static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1680{
1681 u32 spio_mask = (1 << spio_num);
1682 u32 spio_reg;
1683
1684 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1685 (spio_num > MISC_REGISTERS_SPIO_7)) {
1686 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1687 return -EINVAL;
1688 }
1689
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001690 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001691 /* read SPIO and mask except the float bits */
1692 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1693
1694 switch (mode) {
Eilon Greenstein6378c022008-08-13 15:59:25 -07001695 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001696 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1697 /* clear FLOAT and set CLR */
1698 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1699 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1700 break;
1701
Eilon Greenstein6378c022008-08-13 15:59:25 -07001702 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001703 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1704 /* clear FLOAT and set SET */
1705 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1706 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1707 break;
1708
1709 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1710 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1711 /* set FLOAT */
1712 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1713 break;
1714
1715 default:
1716 break;
1717 }
1718
1719 REG_WR(bp, MISC_REG_SPIO, spio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001720 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001721
1722 return 0;
1723}
1724
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001725void bnx2x_calc_fc_adv(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001726{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001727 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
Eilon Greensteinad33ea32009-01-14 21:24:57 -08001728 switch (bp->link_vars.ieee_fc &
1729 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001730 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001731 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001732 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001733 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001734
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001735 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001736 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001737 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001738 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001739
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001740 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001741 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001742 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001743
Eliezer Tamirf1410642008-02-28 11:51:50 -08001744 default:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001745 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001746 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001747 break;
1748 }
1749}
1750
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001751u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001752{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001753 if (!BP_NOMCP(bp)) {
1754 u8 rc;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001755 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1756 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
Eilon Greenstein19680c42008-08-13 15:47:33 -07001757 /* Initialize link parameters structure variables */
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07001758 /* It is recommended to turn off RX FC for jumbo frames
1759 for better performance */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001760 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
David S. Millerc0700f92008-12-16 23:53:20 -08001761 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07001762 else
David S. Millerc0700f92008-12-16 23:53:20 -08001763 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001764
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001765 bnx2x_acquire_phy_lock(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001766
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001767 if (load_mode == LOAD_DIAG) {
Yaniv Rosnerde6eae12010-09-07 11:41:13 +00001768 bp->link_params.loopback_mode = LOOPBACK_XGXS;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001769 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1770 }
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001771
Eilon Greenstein19680c42008-08-13 15:47:33 -07001772 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001773
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001774 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001775
Eilon Greenstein3c96c682009-01-14 21:25:31 -08001776 bnx2x_calc_fc_adv(bp);
1777
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001778 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1779 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001780 bnx2x_link_report(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001781 }
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001782 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
Eilon Greenstein19680c42008-08-13 15:47:33 -07001783 return rc;
1784 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001785 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
Eilon Greenstein19680c42008-08-13 15:47:33 -07001786 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001787}
1788
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001789void bnx2x_link_set(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001790{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001791 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001792 bnx2x_acquire_phy_lock(bp);
Yaniv Rosner54c2fb72010-09-01 09:51:23 +00001793 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001794 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001795 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001796
Eilon Greenstein19680c42008-08-13 15:47:33 -07001797 bnx2x_calc_fc_adv(bp);
1798 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00001799 BNX2X_ERR("Bootcode is missing - can not set link\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001800}
1801
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001802static void bnx2x__link_reset(struct bnx2x *bp)
1803{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001804 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001805 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein589abe32009-02-12 08:36:55 +00001806 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001807 bnx2x_release_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001808 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00001809 BNX2X_ERR("Bootcode is missing - can not reset link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001810}
1811
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001812u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001813{
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001814 u8 rc = 0;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001815
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001816 if (!BP_NOMCP(bp)) {
1817 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001818 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1819 is_serdes);
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001820 bnx2x_release_phy_lock(bp);
1821 } else
1822 BNX2X_ERR("Bootcode is missing - can not test link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001823
1824 return rc;
1825}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001826
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001827static void bnx2x_init_port_minmax(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001828{
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001829 u32 r_param = bp->link_vars.line_speed / 8;
1830 u32 fair_periodic_timeout_usec;
1831 u32 t_fair;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001832
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001833 memset(&(bp->cmng.rs_vars), 0,
1834 sizeof(struct rate_shaping_vars_per_port));
1835 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001836
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001837 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1838 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001839
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001840 /* this is the threshold below which no timer arming will occur
1841 1.25 coefficient is for the threshold to be a little bigger
1842 than the real time, to compensate for timer in-accuracy */
1843 bp->cmng.rs_vars.rs_threshold =
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001844 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1845
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001846 /* resolution of fairness timer */
1847 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1848 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1849 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001850
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001851 /* this is the threshold below which we won't arm the timer anymore */
1852 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001853
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001854 /* we multiply by 1e3/8 to get bytes/msec.
1855 We don't want the credits to pass a credit
1856 of the t_fair*FAIR_MEM (algorithm resolution) */
1857 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1858 /* since each tick is 4 usec */
1859 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001860}
1861
Eilon Greenstein2691d512009-08-12 08:22:08 +00001862/* Calculates the sum of vn_min_rates.
1863 It's needed for further normalizing of the min_rates.
1864 Returns:
1865 sum of vn_min_rates.
1866 or
1867 0 - if all the min_rates are 0.
1868 In the later case fainess algorithm should be deactivated.
1869 If not all min_rates are zero then those that are zeroes will be set to 1.
1870 */
1871static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1872{
1873 int all_zero = 1;
Eilon Greenstein2691d512009-08-12 08:22:08 +00001874 int vn;
1875
1876 bp->vn_weight_sum = 0;
1877 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001878 u32 vn_cfg = bp->mf_config[vn];
Eilon Greenstein2691d512009-08-12 08:22:08 +00001879 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1880 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1881
1882 /* Skip hidden vns */
1883 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1884 continue;
1885
1886 /* If min rate is zero - set it to 1 */
1887 if (!vn_min_rate)
1888 vn_min_rate = DEF_MIN_RATE;
1889 else
1890 all_zero = 0;
1891
1892 bp->vn_weight_sum += vn_min_rate;
1893 }
1894
Dmitry Kravkov30ae438b2011-06-14 01:33:13 +00001895 /* if ETS or all min rates are zeros - disable fairness */
1896 if (BNX2X_IS_ETS_ENABLED(bp)) {
1897 bp->cmng.flags.cmng_enables &=
1898 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1899 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
1900 } else if (all_zero) {
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001901 bp->cmng.flags.cmng_enables &=
1902 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1903 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1904 " fairness will be disabled\n");
1905 } else
1906 bp->cmng.flags.cmng_enables |=
1907 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
Eilon Greenstein2691d512009-08-12 08:22:08 +00001908}
1909
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001910static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001911{
1912 struct rate_shaping_vars_per_vn m_rs_vn;
1913 struct fairness_vars_per_vn m_fair_vn;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001914 u32 vn_cfg = bp->mf_config[vn];
1915 int func = 2*vn + BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001916 u16 vn_min_rate, vn_max_rate;
1917 int i;
1918
1919 /* If function is hidden - set min and max to zeroes */
1920 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1921 vn_min_rate = 0;
1922 vn_max_rate = 0;
1923
1924 } else {
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001925 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
1926
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001927 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1928 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001929 /* If fairness is enabled (not all min rates are zeroes) and
1930 if current min rate is zero - set it to 1.
1931 This is a requirement of the algorithm. */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001932 if (bp->vn_weight_sum && (vn_min_rate == 0))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001933 vn_min_rate = DEF_MIN_RATE;
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001934
1935 if (IS_MF_SI(bp))
1936 /* maxCfg in percents of linkspeed */
1937 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
1938 else
1939 /* maxCfg is absolute in 100Mb units */
1940 vn_max_rate = maxCfg * 100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001941 }
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001942
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001943 DP(NETIF_MSG_IFUP,
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001944 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001945 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001946
1947 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1948 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1949
1950 /* global vn counter - maximal Mbps for this vn */
1951 m_rs_vn.vn_counter.rate = vn_max_rate;
1952
1953 /* quota - number of bytes transmitted in this period */
1954 m_rs_vn.vn_counter.quota =
1955 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1956
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001957 if (bp->vn_weight_sum) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001958 /* credit for each period of the fairness algorithm:
1959 number of bytes in T_FAIR (the vn share the port rate).
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001960 vn_weight_sum should not be larger than 10000, thus
1961 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1962 than zero */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001963 m_fair_vn.vn_credit_delta =
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00001964 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1965 (8 * bp->vn_weight_sum))),
Dmitry Kravkovff80ee02011-02-28 03:37:11 +00001966 (bp->cmng.fair_vars.fair_threshold +
1967 MIN_ABOVE_THRESH));
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00001968 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001969 m_fair_vn.vn_credit_delta);
1970 }
1971
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001972 /* Store it to internal memory */
1973 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1974 REG_WR(bp, BAR_XSTRORM_INTMEM +
1975 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1976 ((u32 *)(&m_rs_vn))[i]);
1977
1978 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1979 REG_WR(bp, BAR_XSTRORM_INTMEM +
1980 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1981 ((u32 *)(&m_fair_vn))[i]);
1982}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001983
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001984static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
1985{
1986 if (CHIP_REV_IS_SLOW(bp))
1987 return CMNG_FNS_NONE;
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00001988 if (IS_MF(bp))
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001989 return CMNG_FNS_MINMAX;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001990
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001991 return CMNG_FNS_NONE;
1992}
1993
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001994void bnx2x_read_mf_cfg(struct bnx2x *bp)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001995{
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001996 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001997
1998 if (BP_NOMCP(bp))
1999 return; /* what should be the default bvalue in this case */
2000
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002001 /* For 2 port configuration the absolute function number formula
2002 * is:
2003 * abs_func = 2 * vn + BP_PORT + BP_PATH
2004 *
2005 * and there are 4 functions per port
2006 *
2007 * For 4 port configuration it is
2008 * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
2009 *
2010 * and there are 2 functions per port
2011 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002012 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002013 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2014
2015 if (func >= E1H_FUNC_MAX)
2016 break;
2017
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002018 bp->mf_config[vn] =
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002019 MF_CFG_RD(bp, func_mf_config[func].config);
2020 }
2021}
2022
2023static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2024{
2025
2026 if (cmng_type == CMNG_FNS_MINMAX) {
2027 int vn;
2028
2029 /* clear cmng_enables */
2030 bp->cmng.flags.cmng_enables = 0;
2031
2032 /* read mf conf from shmem */
2033 if (read_cfg)
2034 bnx2x_read_mf_cfg(bp);
2035
2036 /* Init rate shaping and fairness contexts */
2037 bnx2x_init_port_minmax(bp);
2038
2039 /* vn_weight_sum and enable fairness if not 0 */
2040 bnx2x_calc_vn_weight_sum(bp);
2041
2042 /* calculate and set min-max rate for each vn */
Dmitry Kravkovc4154f22011-03-06 10:49:25 +00002043 if (bp->port.pmf)
2044 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2045 bnx2x_init_vn_minmax(bp, vn);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002046
2047 /* always enable rate shaping and fairness */
2048 bp->cmng.flags.cmng_enables |=
2049 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2050 if (!bp->vn_weight_sum)
2051 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2052 " fairness will be disabled\n");
2053 return;
2054 }
2055
2056 /* rate shaping and fairness are disabled */
2057 DP(NETIF_MSG_IFUP,
2058 "rate shaping and fairness are disabled\n");
2059}
2060
2061static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2062{
2063 int port = BP_PORT(bp);
2064 int func;
2065 int vn;
2066
2067 /* Set the attention towards other drivers on the same port */
2068 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2069 if (vn == BP_E1HVN(bp))
2070 continue;
2071
2072 func = ((vn << 1) | port);
2073 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2074 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2075 }
2076}
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002077
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002078/* This function is called upon link interrupt */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002079static void bnx2x_link_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002080{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002081 /* Make sure that we are synced with the current statistics */
2082 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2083
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002084 bnx2x_link_update(&bp->link_params, &bp->link_vars);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002085
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002086 if (bp->link_vars.link_up) {
2087
Eilon Greenstein1c063282009-02-12 08:36:43 +00002088 /* dropless flow control */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002089 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
Eilon Greenstein1c063282009-02-12 08:36:43 +00002090 int port = BP_PORT(bp);
2091 u32 pause_enabled = 0;
2092
2093 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2094 pause_enabled = 1;
2095
2096 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07002097 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
Eilon Greenstein1c063282009-02-12 08:36:43 +00002098 pause_enabled);
2099 }
2100
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002101 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2102 struct host_port_stats *pstats;
2103
2104 pstats = bnx2x_sp(bp, port_stats);
2105 /* reset old bmac stats */
2106 memset(&(pstats->mac_stx[0]), 0,
2107 sizeof(struct mac_stx));
2108 }
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002109 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002110 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2111 }
2112
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002113 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2114 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002115
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002116 if (cmng_fns != CMNG_FNS_NONE) {
2117 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2118 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2119 } else
2120 /* rate shaping and fairness are disabled */
2121 DP(NETIF_MSG_IFUP,
2122 "single function mode without fairness\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002123 }
Dmitry Kravkov9fdc3e92011-03-06 10:49:15 +00002124
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00002125 __bnx2x_link_report(bp);
2126
Dmitry Kravkov9fdc3e92011-03-06 10:49:15 +00002127 if (IS_MF(bp))
2128 bnx2x_link_sync_notify(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002129}
2130
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002131void bnx2x__link_status_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002132{
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00002133 if (bp->state != BNX2X_STATE_OPEN)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002134 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002135
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002136 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2137
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002138 if (bp->link_vars.link_up)
2139 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2140 else
2141 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2142
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002143 /* indicate link status */
2144 bnx2x_link_report(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002145}
2146
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002147static void bnx2x_pmf_update(struct bnx2x *bp)
2148{
2149 int port = BP_PORT(bp);
2150 u32 val;
2151
2152 bp->port.pmf = 1;
2153 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2154
2155 /* enable nig attention */
2156 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002157 if (bp->common.int_block == INT_BLOCK_HC) {
2158 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2159 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2160 } else if (CHIP_IS_E2(bp)) {
2161 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2162 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2163 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002164
2165 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002166}
2167
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002168/* end of Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002169
2170/* slow path */
2171
2172/*
2173 * General service functions
2174 */
2175
Eilon Greenstein2691d512009-08-12 08:22:08 +00002176/* send the MCP a request, block until there is a reply */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002177u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
Eilon Greenstein2691d512009-08-12 08:22:08 +00002178{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002179 int mb_idx = BP_FW_MB_IDX(bp);
Dmitry Kravkova5971d42011-05-25 04:55:51 +00002180 u32 seq;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002181 u32 rc = 0;
2182 u32 cnt = 1;
2183 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2184
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002185 mutex_lock(&bp->fw_mb_mutex);
Dmitry Kravkova5971d42011-05-25 04:55:51 +00002186 seq = ++bp->fw_seq;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002187 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2188 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2189
Eilon Greenstein2691d512009-08-12 08:22:08 +00002190 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2191
2192 do {
2193 /* let the FW do it's magic ... */
2194 msleep(delay);
2195
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002196 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002197
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002198 /* Give the FW up to 5 second (500*10ms) */
2199 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
Eilon Greenstein2691d512009-08-12 08:22:08 +00002200
2201 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2202 cnt*delay, rc, seq);
2203
2204 /* is this a reply to our command? */
2205 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2206 rc &= FW_MSG_CODE_MASK;
2207 else {
2208 /* FW BUG! */
2209 BNX2X_ERR("FW failed to respond!\n");
2210 bnx2x_fw_dump(bp);
2211 rc = 0;
2212 }
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002213 mutex_unlock(&bp->fw_mb_mutex);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002214
2215 return rc;
2216}
2217
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002218static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
2219{
2220#ifdef BCM_CNIC
2221 if (IS_FCOE_FP(fp) && IS_MF(bp))
2222 return false;
2223#endif
2224 return true;
2225}
2226
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002227/* must be called under rtnl_lock */
stephen hemminger8d962862010-10-21 07:50:56 +00002228static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002229{
2230 u32 mask = (1 << cl_id);
2231
2232 /* initial seeting is BNX2X_ACCEPT_NONE */
2233 u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2234 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2235 u8 unmatched_unicast = 0;
2236
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002237 if (filters & BNX2X_ACCEPT_UNMATCHED_UCAST)
2238 unmatched_unicast = 1;
2239
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002240 if (filters & BNX2X_PROMISCUOUS_MODE) {
2241 /* promiscious - accept all, drop none */
2242 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2243 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002244 if (IS_MF_SI(bp)) {
2245 /*
2246 * SI mode defines to accept in promiscuos mode
2247 * only unmatched packets
2248 */
2249 unmatched_unicast = 1;
2250 accp_all_ucast = 0;
2251 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002252 }
2253 if (filters & BNX2X_ACCEPT_UNICAST) {
2254 /* accept matched ucast */
2255 drop_all_ucast = 0;
2256 }
Vladislav Zolotarovd9c8f492011-02-01 14:05:30 -08002257 if (filters & BNX2X_ACCEPT_MULTICAST)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002258 /* accept matched mcast */
2259 drop_all_mcast = 0;
Vladislav Zolotarovd9c8f492011-02-01 14:05:30 -08002260
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002261 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2262 /* accept all mcast */
2263 drop_all_ucast = 0;
2264 accp_all_ucast = 1;
2265 }
2266 if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2267 /* accept all mcast */
2268 drop_all_mcast = 0;
2269 accp_all_mcast = 1;
2270 }
2271 if (filters & BNX2X_ACCEPT_BROADCAST) {
2272 /* accept (all) bcast */
2273 drop_all_bcast = 0;
2274 accp_all_bcast = 1;
2275 }
2276
2277 bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2278 bp->mac_filters.ucast_drop_all | mask :
2279 bp->mac_filters.ucast_drop_all & ~mask;
2280
2281 bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2282 bp->mac_filters.mcast_drop_all | mask :
2283 bp->mac_filters.mcast_drop_all & ~mask;
2284
2285 bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2286 bp->mac_filters.bcast_drop_all | mask :
2287 bp->mac_filters.bcast_drop_all & ~mask;
2288
2289 bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2290 bp->mac_filters.ucast_accept_all | mask :
2291 bp->mac_filters.ucast_accept_all & ~mask;
2292
2293 bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2294 bp->mac_filters.mcast_accept_all | mask :
2295 bp->mac_filters.mcast_accept_all & ~mask;
2296
2297 bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2298 bp->mac_filters.bcast_accept_all | mask :
2299 bp->mac_filters.bcast_accept_all & ~mask;
2300
2301 bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2302 bp->mac_filters.unmatched_unicast | mask :
2303 bp->mac_filters.unmatched_unicast & ~mask;
2304}
2305
stephen hemminger8d962862010-10-21 07:50:56 +00002306static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002307{
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002308 struct tstorm_eth_function_common_config tcfg = {0};
2309 u16 rss_flgs;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002310
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002311 /* tpa */
2312 if (p->func_flgs & FUNC_FLG_TPA)
2313 tcfg.config_flags |=
2314 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002315
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002316 /* set rss flags */
2317 rss_flgs = (p->rss->mode <<
2318 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002319
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002320 if (p->rss->cap & RSS_IPV4_CAP)
2321 rss_flgs |= RSS_IPV4_CAP_MASK;
2322 if (p->rss->cap & RSS_IPV4_TCP_CAP)
2323 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2324 if (p->rss->cap & RSS_IPV6_CAP)
2325 rss_flgs |= RSS_IPV6_CAP_MASK;
2326 if (p->rss->cap & RSS_IPV6_TCP_CAP)
2327 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002328
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002329 tcfg.config_flags |= rss_flgs;
2330 tcfg.rss_result_mask = p->rss->result_mask;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002331
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002332 storm_memset_func_cfg(bp, &tcfg, p->func_id);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002333
2334 /* Enable the function in the FW */
2335 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2336 storm_memset_func_en(bp, p->func_id, 1);
2337
2338 /* statistics */
2339 if (p->func_flgs & FUNC_FLG_STATS) {
2340 struct stats_indication_flags stats_flags = {0};
2341 stats_flags.collect_eth = 1;
2342
2343 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2344 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2345
2346 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2347 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2348
2349 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2350 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2351
2352 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2353 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2354 }
2355
2356 /* spq */
2357 if (p->func_flgs & FUNC_FLG_SPQ) {
2358 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2359 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2360 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2361 }
2362}
2363
2364static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2365 struct bnx2x_fastpath *fp)
2366{
2367 u16 flags = 0;
2368
2369 /* calculate queue flags */
2370 flags |= QUEUE_FLG_CACHE_ALIGN;
2371 flags |= QUEUE_FLG_HC;
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002372 flags |= IS_MF_SD(bp) ? QUEUE_FLG_OV : 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002373
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002374 flags |= QUEUE_FLG_VLAN;
2375 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002376
2377 if (!fp->disable_tpa)
2378 flags |= QUEUE_FLG_TPA;
2379
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002380 flags = stat_counter_valid(bp, fp) ?
2381 (flags | QUEUE_FLG_STATS) : (flags & ~QUEUE_FLG_STATS);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002382
2383 return flags;
2384}
2385
2386static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2387 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2388 struct bnx2x_rxq_init_params *rxq_init)
2389{
2390 u16 max_sge = 0;
2391 u16 sge_sz = 0;
2392 u16 tpa_agg_size = 0;
2393
2394 /* calculate queue flags */
2395 u16 flags = bnx2x_get_cl_flags(bp, fp);
2396
2397 if (!fp->disable_tpa) {
2398 pause->sge_th_hi = 250;
2399 pause->sge_th_lo = 150;
2400 tpa_agg_size = min_t(u32,
2401 (min_t(u32, 8, MAX_SKB_FRAGS) *
2402 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2403 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2404 SGE_PAGE_SHIFT;
2405 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2406 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2407 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2408 0xffff);
2409 }
2410
2411 /* pause - not for e1 */
2412 if (!CHIP_IS_E1(bp)) {
2413 pause->bd_th_hi = 350;
2414 pause->bd_th_lo = 250;
2415 pause->rcq_th_hi = 350;
2416 pause->rcq_th_lo = 250;
2417 pause->sge_th_hi = 0;
2418 pause->sge_th_lo = 0;
2419 pause->pri_map = 1;
2420 }
2421
2422 /* rxq setup */
2423 rxq_init->flags = flags;
2424 rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2425 rxq_init->dscr_map = fp->rx_desc_mapping;
2426 rxq_init->sge_map = fp->rx_sge_mapping;
2427 rxq_init->rcq_map = fp->rx_comp_mapping;
2428 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002429
2430 /* Always use mini-jumbo MTU for FCoE L2 ring */
2431 if (IS_FCOE_FP(fp))
2432 rxq_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2433 else
2434 rxq_init->mtu = bp->dev->mtu;
2435
2436 rxq_init->buf_sz = fp->rx_buf_size;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002437 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2438 rxq_init->cl_id = fp->cl_id;
2439 rxq_init->spcl_id = fp->cl_id;
2440 rxq_init->stat_id = fp->cl_id;
2441 rxq_init->tpa_agg_sz = tpa_agg_size;
2442 rxq_init->sge_buf_sz = sge_sz;
2443 rxq_init->max_sges_pkt = max_sge;
2444 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2445 rxq_init->fw_sb_id = fp->fw_sb_id;
2446
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002447 if (IS_FCOE_FP(fp))
2448 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
2449 else
2450 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002451
2452 rxq_init->cid = HW_CID(bp, fp->cid);
2453
2454 rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2455}
2456
2457static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2458 struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2459{
2460 u16 flags = bnx2x_get_cl_flags(bp, fp);
2461
2462 txq_init->flags = flags;
2463 txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2464 txq_init->dscr_map = fp->tx_desc_mapping;
2465 txq_init->stat_id = fp->cl_id;
2466 txq_init->cid = HW_CID(bp, fp->cid);
2467 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2468 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2469 txq_init->fw_sb_id = fp->fw_sb_id;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002470
2471 if (IS_FCOE_FP(fp)) {
2472 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
2473 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
2474 }
2475
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002476 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2477}
2478
stephen hemminger8d962862010-10-21 07:50:56 +00002479static void bnx2x_pf_init(struct bnx2x *bp)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002480{
2481 struct bnx2x_func_init_params func_init = {0};
2482 struct bnx2x_rss_params rss = {0};
2483 struct event_ring_data eq_data = { {0} };
2484 u16 flags;
2485
2486 /* pf specific setups */
2487 if (!CHIP_IS_E1(bp))
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00002488 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002489
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002490 if (CHIP_IS_E2(bp)) {
2491 /* reset IGU PF statistics: MSIX + ATTN */
2492 /* PF */
2493 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2494 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2495 (CHIP_MODE_IS_4_PORT(bp) ?
2496 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2497 /* ATTN */
2498 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2499 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2500 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2501 (CHIP_MODE_IS_4_PORT(bp) ?
2502 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2503 }
2504
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002505 /* function setup flags */
2506 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2507
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002508 if (CHIP_IS_E1x(bp))
2509 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2510 else
2511 flags |= FUNC_FLG_TPA;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002512
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002513 /* function setup */
2514
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002515 /**
2516 * Although RSS is meaningless when there is a single HW queue we
2517 * still need it enabled in order to have HW Rx hash generated.
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002518 */
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002519 rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2520 RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2521 rss.mode = bp->multi_mode;
2522 rss.result_mask = MULTI_MASK;
2523 func_init.rss = &rss;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002524
2525 func_init.func_flgs = flags;
2526 func_init.pf_id = BP_FUNC(bp);
2527 func_init.func_id = BP_FUNC(bp);
2528 func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2529 func_init.spq_map = bp->spq_mapping;
2530 func_init.spq_prod = bp->spq_prod_idx;
2531
2532 bnx2x_func_init(bp, &func_init);
2533
2534 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2535
2536 /*
2537 Congestion management values depend on the link rate
2538 There is no active link so initial link rate is set to 10 Gbps.
2539 When the link comes up The congestion management values are
2540 re-calculated according to the actual link rate.
2541 */
2542 bp->link_vars.line_speed = SPEED_10000;
2543 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2544
2545 /* Only the PMF sets the HW */
2546 if (bp->port.pmf)
2547 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2548
2549 /* no rx until link is up */
2550 bp->rx_mode = BNX2X_RX_MODE_NONE;
2551 bnx2x_set_storm_rx_mode(bp);
2552
2553 /* init Event Queue */
2554 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2555 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2556 eq_data.producer = bp->eq_prod;
2557 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2558 eq_data.sb_id = DEF_SB_ID;
2559 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2560}
2561
2562
Eilon Greenstein2691d512009-08-12 08:22:08 +00002563static void bnx2x_e1h_disable(struct bnx2x *bp)
2564{
2565 int port = BP_PORT(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002566
2567 netif_tx_disable(bp->dev);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002568
2569 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2570
Eilon Greenstein2691d512009-08-12 08:22:08 +00002571 netif_carrier_off(bp->dev);
2572}
2573
2574static void bnx2x_e1h_enable(struct bnx2x *bp)
2575{
2576 int port = BP_PORT(bp);
2577
2578 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2579
Eilon Greenstein2691d512009-08-12 08:22:08 +00002580 /* Tx queue should be only reenabled */
2581 netif_tx_wake_all_queues(bp->dev);
2582
Eilon Greenstein061bc702009-10-15 00:18:47 -07002583 /*
2584 * Should not call netif_carrier_on since it will be called if the link
2585 * is up when checking for link state
2586 */
Eilon Greenstein2691d512009-08-12 08:22:08 +00002587}
2588
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002589/* called due to MCP event (on pmf):
2590 * reread new bandwidth configuration
2591 * configure FW
2592 * notify others function about the change
2593 */
2594static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
2595{
2596 if (bp->link_vars.link_up) {
2597 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2598 bnx2x_link_sync_notify(bp);
2599 }
2600 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2601}
2602
2603static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
2604{
2605 bnx2x_config_mf_bw(bp);
2606 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
2607}
2608
Eilon Greenstein2691d512009-08-12 08:22:08 +00002609static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2610{
Eilon Greenstein2691d512009-08-12 08:22:08 +00002611 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002612
2613 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2614
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002615 /*
2616 * This is the only place besides the function initialization
2617 * where the bp->flags can change so it is done without any
2618 * locks
2619 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002620 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
Eilon Greenstein2691d512009-08-12 08:22:08 +00002621 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002622 bp->flags |= MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002623
2624 bnx2x_e1h_disable(bp);
2625 } else {
2626 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002627 bp->flags &= ~MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002628
2629 bnx2x_e1h_enable(bp);
2630 }
2631 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2632 }
2633 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002634 bnx2x_config_mf_bw(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002635 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2636 }
2637
2638 /* Report results to MCP */
2639 if (dcc_event)
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002640 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002641 else
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002642 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002643}
2644
Michael Chan28912902009-10-10 13:46:53 +00002645/* must be called under the spq lock */
2646static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2647{
2648 struct eth_spe *next_spe = bp->spq_prod_bd;
2649
2650 if (bp->spq_prod_bd == bp->spq_last_bd) {
2651 bp->spq_prod_bd = bp->spq;
2652 bp->spq_prod_idx = 0;
2653 DP(NETIF_MSG_TIMER, "end of spq\n");
2654 } else {
2655 bp->spq_prod_bd++;
2656 bp->spq_prod_idx++;
2657 }
2658 return next_spe;
2659}
2660
2661/* must be called under the spq lock */
2662static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2663{
2664 int func = BP_FUNC(bp);
2665
2666 /* Make sure that BD data is updated before writing the producer */
2667 wmb();
2668
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002669 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002670 bp->spq_prod_idx);
Michael Chan28912902009-10-10 13:46:53 +00002671 mmiowb();
2672}
2673
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002674/* the slow path queue is odd since completions arrive on the fastpath ring */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002675int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002676 u32 data_hi, u32 data_lo, int common)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002677{
Michael Chan28912902009-10-10 13:46:53 +00002678 struct eth_spe *spe;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002679 u16 type;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002680
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002681#ifdef BNX2X_STOP_ON_ERROR
2682 if (unlikely(bp->panic))
2683 return -EIO;
2684#endif
2685
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002686 spin_lock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002687
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08002688 if (common) {
2689 if (!atomic_read(&bp->eq_spq_left)) {
2690 BNX2X_ERR("BUG! EQ ring full!\n");
2691 spin_unlock_bh(&bp->spq_lock);
2692 bnx2x_panic();
2693 return -EBUSY;
2694 }
2695 } else if (!atomic_read(&bp->cq_spq_left)) {
2696 BNX2X_ERR("BUG! SPQ ring full!\n");
2697 spin_unlock_bh(&bp->spq_lock);
2698 bnx2x_panic();
2699 return -EBUSY;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002700 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08002701
Michael Chan28912902009-10-10 13:46:53 +00002702 spe = bnx2x_sp_get_next(bp);
2703
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002704 /* CID needs port number to be encoded int it */
Michael Chan28912902009-10-10 13:46:53 +00002705 spe->hdr.conn_and_cmd_data =
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002706 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2707 HW_CID(bp, cid));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002708
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002709 if (common)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002710 /* Common ramrods:
2711 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2712 * TRAFFIC_STOP, TRAFFIC_START
2713 */
2714 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2715 & SPE_HDR_CONN_TYPE;
2716 else
2717 /* ETH ramrods: SETUP, HALT */
2718 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2719 & SPE_HDR_CONN_TYPE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002720
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002721 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2722 SPE_HDR_FUNCTION_ID);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002723
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002724 spe->hdr.type = cpu_to_le16(type);
2725
2726 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2727 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2728
2729 /* stats ramrod has it's own slot on the spq */
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08002730 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002731 /* It's ok if the actual decrement is issued towards the memory
2732 * somewhere between the spin_lock and spin_unlock. Thus no
2733 * more explict memory barrier is needed.
2734 */
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08002735 if (common)
2736 atomic_dec(&bp->eq_spq_left);
2737 else
2738 atomic_dec(&bp->cq_spq_left);
2739 }
2740
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002741
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002742 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002743 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08002744 "type(0x%x) left (ETH, COMMON) (%x,%x)\n",
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002745 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2746 (u32)(U64_LO(bp->spq_mapping) +
2747 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08002748 HW_CID(bp, cid), data_hi, data_lo, type,
2749 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002750
Michael Chan28912902009-10-10 13:46:53 +00002751 bnx2x_sp_prod_update(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002752 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002753 return 0;
2754}
2755
2756/* acquire split MCP access lock register */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002757static int bnx2x_acquire_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002758{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002759 u32 j, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002760 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002761
2762 might_sleep();
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002763 for (j = 0; j < 1000; j++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002764 val = (1UL << 31);
2765 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2766 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2767 if (val & (1L << 31))
2768 break;
2769
2770 msleep(5);
2771 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002772 if (!(val & (1L << 31))) {
Eilon Greenstein19680c42008-08-13 15:47:33 -07002773 BNX2X_ERR("Cannot acquire MCP access lock register\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002774 rc = -EBUSY;
2775 }
2776
2777 return rc;
2778}
2779
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002780/* release split MCP access lock register */
2781static void bnx2x_release_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002782{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002783 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002784}
2785
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002786#define BNX2X_DEF_SB_ATT_IDX 0x0001
2787#define BNX2X_DEF_SB_IDX 0x0002
2788
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002789static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2790{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002791 struct host_sp_status_block *def_sb = bp->def_status_blk;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002792 u16 rc = 0;
2793
2794 barrier(); /* status block is written to by the chip */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002795 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2796 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002797 rc |= BNX2X_DEF_SB_ATT_IDX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002798 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002799
2800 if (bp->def_idx != def_sb->sp_sb.running_index) {
2801 bp->def_idx = def_sb->sp_sb.running_index;
2802 rc |= BNX2X_DEF_SB_IDX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002803 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002804
2805 /* Do not reorder: indecies reading should complete before handling */
2806 barrier();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002807 return rc;
2808}
2809
2810/*
2811 * slow path service functions
2812 */
2813
2814static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2815{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002816 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002817 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2818 MISC_REG_AEU_MASK_ATTN_FUNC_0;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002819 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2820 NIG_REG_MASK_INTERRUPT_PORT0;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002821 u32 aeu_mask;
Eilon Greenstein87942b42009-02-12 08:36:49 +00002822 u32 nig_mask = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002823 u32 reg_addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002824
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002825 if (bp->attn_state & asserted)
2826 BNX2X_ERR("IGU ERROR\n");
2827
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002828 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2829 aeu_mask = REG_RD(bp, aeu_addr);
2830
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002831 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002832 aeu_mask, asserted);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002833 aeu_mask &= ~(asserted & 0x3ff);
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002834 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002835
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002836 REG_WR(bp, aeu_addr, aeu_mask);
2837 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002838
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002839 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002840 bp->attn_state |= asserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002841 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002842
2843 if (asserted & ATTN_HARD_WIRED_MASK) {
2844 if (asserted & ATTN_NIG_FOR_FUNC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002845
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002846 bnx2x_acquire_phy_lock(bp);
2847
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002848 /* save nig interrupt mask */
Eilon Greenstein87942b42009-02-12 08:36:49 +00002849 nig_mask = REG_RD(bp, nig_int_mask_addr);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002850 REG_WR(bp, nig_int_mask_addr, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002851
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002852 bnx2x_link_attn(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002853
2854 /* handle unicore attn? */
2855 }
2856 if (asserted & ATTN_SW_TIMER_4_FUNC)
2857 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2858
2859 if (asserted & GPIO_2_FUNC)
2860 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2861
2862 if (asserted & GPIO_3_FUNC)
2863 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2864
2865 if (asserted & GPIO_4_FUNC)
2866 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2867
2868 if (port == 0) {
2869 if (asserted & ATTN_GENERAL_ATTN_1) {
2870 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2871 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2872 }
2873 if (asserted & ATTN_GENERAL_ATTN_2) {
2874 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2875 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2876 }
2877 if (asserted & ATTN_GENERAL_ATTN_3) {
2878 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2879 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2880 }
2881 } else {
2882 if (asserted & ATTN_GENERAL_ATTN_4) {
2883 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2884 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2885 }
2886 if (asserted & ATTN_GENERAL_ATTN_5) {
2887 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2888 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2889 }
2890 if (asserted & ATTN_GENERAL_ATTN_6) {
2891 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2892 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2893 }
2894 }
2895
2896 } /* if hardwired */
2897
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002898 if (bp->common.int_block == INT_BLOCK_HC)
2899 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2900 COMMAND_REG_ATTN_BITS_SET);
2901 else
2902 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2903
2904 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2905 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2906 REG_WR(bp, reg_addr, asserted);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002907
2908 /* now set back the mask */
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002909 if (asserted & ATTN_NIG_FOR_FUNC) {
Eilon Greenstein87942b42009-02-12 08:36:49 +00002910 REG_WR(bp, nig_int_mask_addr, nig_mask);
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002911 bnx2x_release_phy_lock(bp);
2912 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002913}
2914
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002915static inline void bnx2x_fan_failure(struct bnx2x *bp)
2916{
2917 int port = BP_PORT(bp);
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002918 u32 ext_phy_config;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002919 /* mark the failure */
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002920 ext_phy_config =
2921 SHMEM_RD(bp,
2922 dev_info.port_hw_config[port].external_phy_config);
2923
2924 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2925 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002926 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002927 ext_phy_config);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002928
2929 /* log the failure */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002930 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2931 " the driver to shutdown the card to prevent permanent"
2932 " damage. Please contact OEM Support for assistance\n");
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002933}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00002934
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002935static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2936{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002937 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002938 int reg_offset;
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00002939 u32 val;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002940
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002941 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2942 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002943
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002944 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002945
2946 val = REG_RD(bp, reg_offset);
2947 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2948 REG_WR(bp, reg_offset, val);
2949
2950 BNX2X_ERR("SPIO5 hw attention\n");
2951
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002952 /* Fan failure attention */
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00002953 bnx2x_hw_reset_phy(&bp->link_params);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002954 bnx2x_fan_failure(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002955 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002956
Eilon Greenstein589abe32009-02-12 08:36:55 +00002957 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2958 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2959 bnx2x_acquire_phy_lock(bp);
2960 bnx2x_handle_module_detect_int(&bp->link_params);
2961 bnx2x_release_phy_lock(bp);
2962 }
2963
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002964 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2965
2966 val = REG_RD(bp, reg_offset);
2967 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2968 REG_WR(bp, reg_offset, val);
2969
2970 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002971 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002972 bnx2x_panic();
2973 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002974}
2975
2976static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2977{
2978 u32 val;
2979
Eilon Greenstein0626b892009-02-12 08:38:14 +00002980 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002981
2982 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2983 BNX2X_ERR("DB hw attention 0x%x\n", val);
2984 /* DORQ discard attention */
2985 if (val & 0x2)
2986 BNX2X_ERR("FATAL error from DORQ\n");
2987 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002988
2989 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2990
2991 int port = BP_PORT(bp);
2992 int reg_offset;
2993
2994 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2995 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2996
2997 val = REG_RD(bp, reg_offset);
2998 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2999 REG_WR(bp, reg_offset, val);
3000
3001 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00003002 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003003 bnx2x_panic();
3004 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003005}
3006
3007static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3008{
3009 u32 val;
3010
3011 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3012
3013 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3014 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3015 /* CFC error attention */
3016 if (val & 0x2)
3017 BNX2X_ERR("FATAL error from CFC\n");
3018 }
3019
3020 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3021
3022 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3023 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3024 /* RQ_USDMDP_FIFO_OVERFLOW */
3025 if (val & 0x18000)
3026 BNX2X_ERR("FATAL error from PXP\n");
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003027 if (CHIP_IS_E2(bp)) {
3028 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
3029 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
3030 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003031 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003032
3033 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3034
3035 int port = BP_PORT(bp);
3036 int reg_offset;
3037
3038 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3039 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3040
3041 val = REG_RD(bp, reg_offset);
3042 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3043 REG_WR(bp, reg_offset, val);
3044
3045 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00003046 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003047 bnx2x_panic();
3048 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003049}
3050
3051static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3052{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003053 u32 val;
3054
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003055 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3056
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003057 if (attn & BNX2X_PMF_LINK_ASSERT) {
3058 int func = BP_FUNC(bp);
3059
3060 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003061 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3062 func_mf_config[BP_ABS_FUNC(bp)].config);
3063 val = SHMEM_RD(bp,
3064 func_mb[BP_FW_MB_IDX(bp)].drv_status);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003065 if (val & DRV_STATUS_DCC_EVENT_MASK)
3066 bnx2x_dcc_event(bp,
3067 (val & DRV_STATUS_DCC_EVENT_MASK));
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08003068
3069 if (val & DRV_STATUS_SET_MF_BW)
3070 bnx2x_set_mf_bw(bp);
3071
Eilon Greenstein2691d512009-08-12 08:22:08 +00003072 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003073 bnx2x_pmf_update(bp);
3074
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00003075 /* Always call it here: bnx2x_link_report() will
3076 * prevent the link indication duplication.
3077 */
3078 bnx2x__link_status_update(bp);
3079
Vladislav Zolotarove4901dd2010-12-13 05:44:18 +00003080 if (bp->port.pmf &&
Shmulik Ravid785b9b12010-12-30 06:27:03 +00003081 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
3082 bp->dcbx_enabled > 0)
Vladislav Zolotarove4901dd2010-12-13 05:44:18 +00003083 /* start dcbx state machine */
3084 bnx2x_dcbx_set_params(bp,
3085 BNX2X_DCBX_STATE_NEG_RECEIVED);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003086 } else if (attn & BNX2X_MC_ASSERT_BITS) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003087
3088 BNX2X_ERR("MC assert!\n");
3089 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3090 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3091 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3092 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3093 bnx2x_panic();
3094
3095 } else if (attn & BNX2X_MCP_ASSERT) {
3096
3097 BNX2X_ERR("MCP assert!\n");
3098 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003099 bnx2x_fw_dump(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003100
3101 } else
3102 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3103 }
3104
3105 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003106 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3107 if (attn & BNX2X_GRC_TIMEOUT) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003108 val = CHIP_IS_E1(bp) ? 0 :
3109 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003110 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3111 }
3112 if (attn & BNX2X_GRC_RSV) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003113 val = CHIP_IS_E1(bp) ? 0 :
3114 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003115 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3116 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003117 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003118 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003119}
3120
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003121#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3122#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3123#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3124#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3125#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003126
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003127/*
3128 * should be run under rtnl lock
3129 */
3130static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3131{
3132 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3133 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3134 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3135 barrier();
3136 mmiowb();
3137}
3138
3139/*
3140 * should be run under rtnl lock
3141 */
3142static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3143{
3144 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3145 val |= (1 << 16);
3146 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3147 barrier();
3148 mmiowb();
3149}
3150
3151/*
3152 * should be run under rtnl lock
3153 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003154bool bnx2x_reset_is_done(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003155{
3156 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3157 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3158 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3159}
3160
3161/*
3162 * should be run under rtnl lock
3163 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003164inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003165{
3166 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3167
3168 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3169
3170 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3171 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3172 barrier();
3173 mmiowb();
3174}
3175
3176/*
3177 * should be run under rtnl lock
3178 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003179u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003180{
3181 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3182
3183 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3184
3185 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3186 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3187 barrier();
3188 mmiowb();
3189
3190 return val1;
3191}
3192
3193/*
3194 * should be run under rtnl lock
3195 */
3196static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3197{
3198 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3199}
3200
3201static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3202{
3203 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3204 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3205}
3206
3207static inline void _print_next_block(int idx, const char *blk)
3208{
3209 if (idx)
3210 pr_cont(", ");
3211 pr_cont("%s", blk);
3212}
3213
3214static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3215{
3216 int i = 0;
3217 u32 cur_bit = 0;
3218 for (i = 0; sig; i++) {
3219 cur_bit = ((u32)0x1 << i);
3220 if (sig & cur_bit) {
3221 switch (cur_bit) {
3222 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3223 _print_next_block(par_num++, "BRB");
3224 break;
3225 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3226 _print_next_block(par_num++, "PARSER");
3227 break;
3228 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3229 _print_next_block(par_num++, "TSDM");
3230 break;
3231 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3232 _print_next_block(par_num++, "SEARCHER");
3233 break;
3234 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3235 _print_next_block(par_num++, "TSEMI");
3236 break;
3237 }
3238
3239 /* Clear the bit */
3240 sig &= ~cur_bit;
3241 }
3242 }
3243
3244 return par_num;
3245}
3246
3247static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3248{
3249 int i = 0;
3250 u32 cur_bit = 0;
3251 for (i = 0; sig; i++) {
3252 cur_bit = ((u32)0x1 << i);
3253 if (sig & cur_bit) {
3254 switch (cur_bit) {
3255 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3256 _print_next_block(par_num++, "PBCLIENT");
3257 break;
3258 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3259 _print_next_block(par_num++, "QM");
3260 break;
3261 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3262 _print_next_block(par_num++, "XSDM");
3263 break;
3264 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3265 _print_next_block(par_num++, "XSEMI");
3266 break;
3267 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3268 _print_next_block(par_num++, "DOORBELLQ");
3269 break;
3270 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3271 _print_next_block(par_num++, "VAUX PCI CORE");
3272 break;
3273 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3274 _print_next_block(par_num++, "DEBUG");
3275 break;
3276 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3277 _print_next_block(par_num++, "USDM");
3278 break;
3279 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3280 _print_next_block(par_num++, "USEMI");
3281 break;
3282 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3283 _print_next_block(par_num++, "UPB");
3284 break;
3285 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3286 _print_next_block(par_num++, "CSDM");
3287 break;
3288 }
3289
3290 /* Clear the bit */
3291 sig &= ~cur_bit;
3292 }
3293 }
3294
3295 return par_num;
3296}
3297
3298static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3299{
3300 int i = 0;
3301 u32 cur_bit = 0;
3302 for (i = 0; sig; i++) {
3303 cur_bit = ((u32)0x1 << i);
3304 if (sig & cur_bit) {
3305 switch (cur_bit) {
3306 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3307 _print_next_block(par_num++, "CSEMI");
3308 break;
3309 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3310 _print_next_block(par_num++, "PXP");
3311 break;
3312 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3313 _print_next_block(par_num++,
3314 "PXPPCICLOCKCLIENT");
3315 break;
3316 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3317 _print_next_block(par_num++, "CFC");
3318 break;
3319 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3320 _print_next_block(par_num++, "CDU");
3321 break;
3322 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3323 _print_next_block(par_num++, "IGU");
3324 break;
3325 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3326 _print_next_block(par_num++, "MISC");
3327 break;
3328 }
3329
3330 /* Clear the bit */
3331 sig &= ~cur_bit;
3332 }
3333 }
3334
3335 return par_num;
3336}
3337
3338static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3339{
3340 int i = 0;
3341 u32 cur_bit = 0;
3342 for (i = 0; sig; i++) {
3343 cur_bit = ((u32)0x1 << i);
3344 if (sig & cur_bit) {
3345 switch (cur_bit) {
3346 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3347 _print_next_block(par_num++, "MCP ROM");
3348 break;
3349 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3350 _print_next_block(par_num++, "MCP UMP RX");
3351 break;
3352 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3353 _print_next_block(par_num++, "MCP UMP TX");
3354 break;
3355 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3356 _print_next_block(par_num++, "MCP SCPAD");
3357 break;
3358 }
3359
3360 /* Clear the bit */
3361 sig &= ~cur_bit;
3362 }
3363 }
3364
3365 return par_num;
3366}
3367
3368static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3369 u32 sig2, u32 sig3)
3370{
3371 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3372 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3373 int par_num = 0;
3374 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3375 "[0]:0x%08x [1]:0x%08x "
3376 "[2]:0x%08x [3]:0x%08x\n",
3377 sig0 & HW_PRTY_ASSERT_SET_0,
3378 sig1 & HW_PRTY_ASSERT_SET_1,
3379 sig2 & HW_PRTY_ASSERT_SET_2,
3380 sig3 & HW_PRTY_ASSERT_SET_3);
3381 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3382 bp->dev->name);
3383 par_num = bnx2x_print_blocks_with_parity0(
3384 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3385 par_num = bnx2x_print_blocks_with_parity1(
3386 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3387 par_num = bnx2x_print_blocks_with_parity2(
3388 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3389 par_num = bnx2x_print_blocks_with_parity3(
3390 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3391 printk("\n");
3392 return true;
3393 } else
3394 return false;
3395}
3396
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003397bool bnx2x_chk_parity_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003398{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003399 struct attn_route attn;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003400 int port = BP_PORT(bp);
3401
3402 attn.sig[0] = REG_RD(bp,
3403 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3404 port*4);
3405 attn.sig[1] = REG_RD(bp,
3406 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3407 port*4);
3408 attn.sig[2] = REG_RD(bp,
3409 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3410 port*4);
3411 attn.sig[3] = REG_RD(bp,
3412 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3413 port*4);
3414
3415 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3416 attn.sig[3]);
3417}
3418
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003419
3420static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3421{
3422 u32 val;
3423 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3424
3425 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3426 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3427 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3428 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3429 "ADDRESS_ERROR\n");
3430 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3431 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3432 "INCORRECT_RCV_BEHAVIOR\n");
3433 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3434 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3435 "WAS_ERROR_ATTN\n");
3436 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3437 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3438 "VF_LENGTH_VIOLATION_ATTN\n");
3439 if (val &
3440 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3441 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3442 "VF_GRC_SPACE_VIOLATION_ATTN\n");
3443 if (val &
3444 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3445 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3446 "VF_MSIX_BAR_VIOLATION_ATTN\n");
3447 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3448 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3449 "TCPL_ERROR_ATTN\n");
3450 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3451 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3452 "TCPL_IN_TWO_RCBS_ATTN\n");
3453 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3454 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3455 "CSSNOOP_FIFO_OVERFLOW\n");
3456 }
3457 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3458 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3459 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3460 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3461 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3462 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3463 BNX2X_ERR("ATC_ATC_INT_STS_REG"
3464 "_ATC_TCPL_TO_NOT_PEND\n");
3465 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3466 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3467 "ATC_GPA_MULTIPLE_HITS\n");
3468 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3469 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3470 "ATC_RCPL_TO_EMPTY_CNT\n");
3471 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3472 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3473 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3474 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3475 "ATC_IREQ_LESS_THAN_STU\n");
3476 }
3477
3478 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3479 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3480 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3481 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3482 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3483 }
3484
3485}
3486
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003487static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3488{
3489 struct attn_route attn, *group_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003490 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003491 int index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003492 u32 reg_addr;
3493 u32 val;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003494 u32 aeu_mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003495
3496 /* need to take HW lock because MCP or other port might also
3497 try to handle this event */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003498 bnx2x_acquire_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003499
Vladislav Zolotarov4a33bc02011-01-09 02:20:04 +00003500 if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) {
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003501 bp->recovery_state = BNX2X_RECOVERY_INIT;
3502 bnx2x_set_reset_in_progress(bp);
3503 schedule_delayed_work(&bp->reset_task, 0);
3504 /* Disable HW interrupts */
3505 bnx2x_int_disable(bp);
3506 bnx2x_release_alr(bp);
3507 /* In case of parity errors don't handle attentions so that
3508 * other function would "see" parity errors.
3509 */
3510 return;
3511 }
3512
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003513 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3514 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3515 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3516 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003517 if (CHIP_IS_E2(bp))
3518 attn.sig[4] =
3519 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3520 else
3521 attn.sig[4] = 0;
3522
3523 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3524 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003525
3526 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3527 if (deasserted & (1 << index)) {
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003528 group_mask = &bp->attn_group[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003529
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003530 DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3531 "%08x %08x %08x\n",
3532 index,
3533 group_mask->sig[0], group_mask->sig[1],
3534 group_mask->sig[2], group_mask->sig[3],
3535 group_mask->sig[4]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003536
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003537 bnx2x_attn_int_deasserted4(bp,
3538 attn.sig[4] & group_mask->sig[4]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003539 bnx2x_attn_int_deasserted3(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003540 attn.sig[3] & group_mask->sig[3]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003541 bnx2x_attn_int_deasserted1(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003542 attn.sig[1] & group_mask->sig[1]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003543 bnx2x_attn_int_deasserted2(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003544 attn.sig[2] & group_mask->sig[2]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003545 bnx2x_attn_int_deasserted0(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003546 attn.sig[0] & group_mask->sig[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003547 }
3548 }
3549
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003550 bnx2x_release_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003551
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003552 if (bp->common.int_block == INT_BLOCK_HC)
3553 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3554 COMMAND_REG_ATTN_BITS_CLR);
3555 else
3556 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003557
3558 val = ~deasserted;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003559 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3560 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
Eilon Greenstein5c862842008-08-13 15:51:48 -07003561 REG_WR(bp, reg_addr, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003562
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003563 if (~bp->attn_state & deasserted)
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003564 BNX2X_ERR("IGU ERROR\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003565
3566 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3567 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3568
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003569 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3570 aeu_mask = REG_RD(bp, reg_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003571
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003572 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3573 aeu_mask, deasserted);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003574 aeu_mask |= (deasserted & 0x3ff);
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003575 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3576
3577 REG_WR(bp, reg_addr, aeu_mask);
3578 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003579
3580 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3581 bp->attn_state &= ~deasserted;
3582 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3583}
3584
3585static void bnx2x_attn_int(struct bnx2x *bp)
3586{
3587 /* read local copy of bits */
Eilon Greenstein68d59482009-01-14 21:27:36 -08003588 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3589 attn_bits);
3590 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3591 attn_bits_ack);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003592 u32 attn_state = bp->attn_state;
3593
3594 /* look for changed bits */
3595 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3596 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3597
3598 DP(NETIF_MSG_HW,
3599 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3600 attn_bits, attn_ack, asserted, deasserted);
3601
3602 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003603 BNX2X_ERR("BAD attention state\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003604
3605 /* handle bits that were raised */
3606 if (asserted)
3607 bnx2x_attn_int_asserted(bp, asserted);
3608
3609 if (deasserted)
3610 bnx2x_attn_int_deasserted(bp, deasserted);
3611}
3612
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003613static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3614{
3615 /* No memory barriers */
3616 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3617 mmiowb(); /* keep prod updates ordered */
3618}
3619
3620#ifdef BCM_CNIC
3621static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3622 union event_ring_elem *elem)
3623{
3624 if (!bp->cnic_eth_dev.starting_cid ||
Vladislav Zolotarovc3a8ce62011-05-22 10:08:09 +00003625 (cid < bp->cnic_eth_dev.starting_cid &&
3626 cid != bp->cnic_eth_dev.iscsi_l2_cid))
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003627 return 1;
3628
3629 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3630
3631 if (unlikely(elem->message.data.cfc_del_event.error)) {
3632 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3633 cid);
3634 bnx2x_panic_dump(bp);
3635 }
3636 bnx2x_cnic_cfc_comp(bp, cid);
3637 return 0;
3638}
3639#endif
3640
3641static void bnx2x_eq_int(struct bnx2x *bp)
3642{
3643 u16 hw_cons, sw_cons, sw_prod;
3644 union event_ring_elem *elem;
3645 u32 cid;
3646 u8 opcode;
3647 int spqe_cnt = 0;
3648
3649 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3650
3651 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3652 * when we get the the next-page we nned to adjust so the loop
3653 * condition below will be met. The next element is the size of a
3654 * regular element and hence incrementing by 1
3655 */
3656 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3657 hw_cons++;
3658
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003659 /* This function may never run in parallel with itself for a
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003660 * specific bp, thus there is no need in "paired" read memory
3661 * barrier here.
3662 */
3663 sw_cons = bp->eq_cons;
3664 sw_prod = bp->eq_prod;
3665
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08003666 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->cq_spq_left %u\n",
3667 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003668
3669 for (; sw_cons != hw_cons;
3670 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3671
3672
3673 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3674
3675 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3676 opcode = elem->message.opcode;
3677
3678
3679 /* handle eq element */
3680 switch (opcode) {
3681 case EVENT_RING_OPCODE_STAT_QUERY:
3682 DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3683 /* nothing to do with stats comp */
3684 continue;
3685
3686 case EVENT_RING_OPCODE_CFC_DEL:
3687 /* handle according to cid range */
3688 /*
3689 * we may want to verify here that the bp state is
3690 * HALTING
3691 */
3692 DP(NETIF_MSG_IFDOWN,
3693 "got delete ramrod for MULTI[%d]\n", cid);
3694#ifdef BCM_CNIC
3695 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3696 goto next_spqe;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003697 if (cid == BNX2X_FCOE_ETH_CID)
3698 bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
3699 else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003700#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003701 bnx2x_fp(bp, cid, state) =
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003702 BNX2X_FP_STATE_CLOSED;
3703
3704 goto next_spqe;
Vladislav Zolotarove4901dd2010-12-13 05:44:18 +00003705
3706 case EVENT_RING_OPCODE_STOP_TRAFFIC:
3707 DP(NETIF_MSG_IFUP, "got STOP TRAFFIC\n");
3708 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
3709 goto next_spqe;
3710 case EVENT_RING_OPCODE_START_TRAFFIC:
3711 DP(NETIF_MSG_IFUP, "got START TRAFFIC\n");
3712 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
3713 goto next_spqe;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003714 }
3715
3716 switch (opcode | bp->state) {
3717 case (EVENT_RING_OPCODE_FUNCTION_START |
3718 BNX2X_STATE_OPENING_WAIT4_PORT):
3719 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3720 bp->state = BNX2X_STATE_FUNC_STARTED;
3721 break;
3722
3723 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3724 BNX2X_STATE_CLOSING_WAIT4_HALT):
3725 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3726 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3727 break;
3728
3729 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3730 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3731 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08003732 if (elem->message.data.set_mac_event.echo)
3733 bp->set_mac_pending = 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003734 break;
3735
3736 case (EVENT_RING_OPCODE_SET_MAC |
3737 BNX2X_STATE_CLOSING_WAIT4_HALT):
3738 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08003739 if (elem->message.data.set_mac_event.echo)
3740 bp->set_mac_pending = 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003741 break;
3742 default:
3743 /* unknown event log error and continue */
3744 BNX2X_ERR("Unknown EQ event %d\n",
3745 elem->message.opcode);
3746 }
3747next_spqe:
3748 spqe_cnt++;
3749 } /* for */
3750
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00003751 smp_mb__before_atomic_inc();
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08003752 atomic_add(spqe_cnt, &bp->eq_spq_left);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003753
3754 bp->eq_cons = sw_cons;
3755 bp->eq_prod = sw_prod;
3756 /* Make sure that above mem writes were issued towards the memory */
3757 smp_wmb();
3758
3759 /* update producer */
3760 bnx2x_update_eq_prod(bp, bp->eq_prod);
3761}
3762
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003763static void bnx2x_sp_task(struct work_struct *work)
3764{
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003765 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003766 u16 status;
3767
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003768 status = bnx2x_update_dsb_idx(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003769/* if (status == 0) */
3770/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003771
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003772 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003773
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003774 /* HW attentions */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003775 if (status & BNX2X_DEF_SB_ATT_IDX) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003776 bnx2x_attn_int(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003777 status &= ~BNX2X_DEF_SB_ATT_IDX;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003778 }
3779
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003780 /* SP events: STAT_QUERY and others */
3781 if (status & BNX2X_DEF_SB_IDX) {
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003782#ifdef BCM_CNIC
3783 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003784
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003785 if ((!NO_FCOE(bp)) &&
3786 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp)))
3787 napi_schedule(&bnx2x_fcoe(bp, napi));
3788#endif
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003789 /* Handle EQ completions */
3790 bnx2x_eq_int(bp);
3791
3792 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3793 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3794
3795 status &= ~BNX2X_DEF_SB_IDX;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003796 }
3797
3798 if (unlikely(status))
3799 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3800 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003801
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003802 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3803 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003804}
3805
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003806irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003807{
3808 struct net_device *dev = dev_instance;
3809 struct bnx2x *bp = netdev_priv(dev);
3810
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003811 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3812 IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003813
3814#ifdef BNX2X_STOP_ON_ERROR
3815 if (unlikely(bp->panic))
3816 return IRQ_HANDLED;
3817#endif
3818
Michael Chan993ac7b2009-10-10 13:46:56 +00003819#ifdef BCM_CNIC
3820 {
3821 struct cnic_ops *c_ops;
3822
3823 rcu_read_lock();
3824 c_ops = rcu_dereference(bp->cnic_ops);
3825 if (c_ops)
3826 c_ops->cnic_handler(bp->cnic_data, NULL);
3827 rcu_read_unlock();
3828 }
3829#endif
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003830 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003831
3832 return IRQ_HANDLED;
3833}
3834
3835/* end of slow path */
3836
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003837static void bnx2x_timer(unsigned long data)
3838{
3839 struct bnx2x *bp = (struct bnx2x *) data;
3840
3841 if (!netif_running(bp->dev))
3842 return;
3843
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003844 if (poll) {
3845 struct bnx2x_fastpath *fp = &bp->fp[0];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003846
Eilon Greenstein7961f792009-03-02 07:59:31 +00003847 bnx2x_tx_int(fp);
David S. Millerb8ee8322011-04-17 16:56:12 -07003848 bnx2x_rx_int(fp, 1000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003849 }
3850
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003851 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003852 int mb_idx = BP_FW_MB_IDX(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003853 u32 drv_pulse;
3854 u32 mcp_pulse;
3855
3856 ++bp->fw_drv_pulse_wr_seq;
3857 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3858 /* TBD - add SYSTEM_TIME */
3859 drv_pulse = bp->fw_drv_pulse_wr_seq;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003860 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003861
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003862 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003863 MCP_PULSE_SEQ_MASK);
3864 /* The delta between driver pulse and mcp response
3865 * should be 1 (before mcp response) or 0 (after mcp response)
3866 */
3867 if ((drv_pulse != mcp_pulse) &&
3868 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3869 /* someone lost a heartbeat... */
3870 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3871 drv_pulse, mcp_pulse);
3872 }
3873 }
3874
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07003875 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003876 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003877
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003878 mod_timer(&bp->timer, jiffies + bp->current_interval);
3879}
3880
3881/* end of Statistics */
3882
3883/* nic init */
3884
3885/*
3886 * nic init service functions
3887 */
3888
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003889static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003890{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003891 u32 i;
3892 if (!(len%4) && !(addr%4))
3893 for (i = 0; i < len; i += 4)
3894 REG_WR(bp, addr + i, fill);
3895 else
3896 for (i = 0; i < len; i++)
3897 REG_WR8(bp, addr + i, fill);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003898
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003899}
3900
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003901/* helper: writes FP SP data to FW - data_size in dwords */
3902static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3903 int fw_sb_id,
3904 u32 *sb_data_p,
3905 u32 data_size)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003906{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003907 int index;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003908 for (index = 0; index < data_size; index++)
3909 REG_WR(bp, BAR_CSTRORM_INTMEM +
3910 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3911 sizeof(u32)*index,
3912 *(sb_data_p + index));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003913}
3914
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003915static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3916{
3917 u32 *sb_data_p;
3918 u32 data_size = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003919 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003920 struct hc_status_block_data_e1x sb_data_e1x;
3921
3922 /* disable the function first */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003923 if (CHIP_IS_E2(bp)) {
3924 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3925 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3926 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3927 sb_data_e2.common.p_func.vf_valid = false;
3928 sb_data_p = (u32 *)&sb_data_e2;
3929 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3930 } else {
3931 memset(&sb_data_e1x, 0,
3932 sizeof(struct hc_status_block_data_e1x));
3933 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3934 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3935 sb_data_e1x.common.p_func.vf_valid = false;
3936 sb_data_p = (u32 *)&sb_data_e1x;
3937 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3938 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003939 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3940
3941 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3942 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
3943 CSTORM_STATUS_BLOCK_SIZE);
3944 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3945 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
3946 CSTORM_SYNC_BLOCK_SIZE);
3947}
3948
3949/* helper: writes SP SB data to FW */
3950static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
3951 struct hc_sp_status_block_data *sp_sb_data)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003952{
3953 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003954 int i;
3955 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
3956 REG_WR(bp, BAR_CSTRORM_INTMEM +
3957 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
3958 i*sizeof(u32),
3959 *((u32 *)sp_sb_data + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003960}
3961
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003962static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
3963{
3964 int func = BP_FUNC(bp);
3965 struct hc_sp_status_block_data sp_sb_data;
3966 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
3967
3968 sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
3969 sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
3970 sp_sb_data.p_func.vf_valid = false;
3971
3972 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
3973
3974 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3975 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
3976 CSTORM_SP_STATUS_BLOCK_SIZE);
3977 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3978 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
3979 CSTORM_SP_SYNC_BLOCK_SIZE);
3980
3981}
3982
3983
3984static inline
3985void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
3986 int igu_sb_id, int igu_seg_id)
3987{
3988 hc_sm->igu_sb_id = igu_sb_id;
3989 hc_sm->igu_seg_id = igu_seg_id;
3990 hc_sm->timer_value = 0xFF;
3991 hc_sm->time_to_expire = 0xFFFFFFFF;
3992}
3993
stephen hemminger8d962862010-10-21 07:50:56 +00003994static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003995 u8 vf_valid, int fw_sb_id, int igu_sb_id)
3996{
3997 int igu_seg_id;
3998
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003999 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004000 struct hc_status_block_data_e1x sb_data_e1x;
4001 struct hc_status_block_sm *hc_sm_p;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004002 int data_size;
4003 u32 *sb_data_p;
4004
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004005 if (CHIP_INT_MODE_IS_BC(bp))
4006 igu_seg_id = HC_SEG_ACCESS_NORM;
4007 else
4008 igu_seg_id = IGU_SEG_ACCESS_NORM;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004009
4010 bnx2x_zero_fp_sb(bp, fw_sb_id);
4011
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004012 if (CHIP_IS_E2(bp)) {
4013 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
4014 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
4015 sb_data_e2.common.p_func.vf_id = vfid;
4016 sb_data_e2.common.p_func.vf_valid = vf_valid;
4017 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
4018 sb_data_e2.common.same_igu_sb_1b = true;
4019 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
4020 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
4021 hc_sm_p = sb_data_e2.common.state_machine;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004022 sb_data_p = (u32 *)&sb_data_e2;
4023 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
4024 } else {
4025 memset(&sb_data_e1x, 0,
4026 sizeof(struct hc_status_block_data_e1x));
4027 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
4028 sb_data_e1x.common.p_func.vf_id = 0xff;
4029 sb_data_e1x.common.p_func.vf_valid = false;
4030 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
4031 sb_data_e1x.common.same_igu_sb_1b = true;
4032 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
4033 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
4034 hc_sm_p = sb_data_e1x.common.state_machine;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004035 sb_data_p = (u32 *)&sb_data_e1x;
4036 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
4037 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004038
4039 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
4040 igu_sb_id, igu_seg_id);
4041 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
4042 igu_sb_id, igu_seg_id);
4043
4044 DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
4045
4046 /* write indecies to HW */
4047 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
4048}
4049
4050static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
4051 u8 sb_index, u8 disable, u16 usec)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004052{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004053 int port = BP_PORT(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004054 u8 ticks = usec / BNX2X_BTR;
4055
4056 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4057
4058 disable = disable ? 1 : (usec ? 0 : 1);
4059 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4060}
4061
4062static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
4063 u16 tx_usec, u16 rx_usec)
4064{
4065 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
4066 false, rx_usec);
4067 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
4068 false, tx_usec);
4069}
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004070
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004071static void bnx2x_init_def_sb(struct bnx2x *bp)
4072{
4073 struct host_sp_status_block *def_sb = bp->def_status_blk;
4074 dma_addr_t mapping = bp->def_status_blk_mapping;
4075 int igu_sp_sb_index;
4076 int igu_seg_id;
4077 int port = BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004078 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004079 int reg_offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004080 u64 section;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004081 int index;
4082 struct hc_sp_status_block_data sp_sb_data;
4083 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4084
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004085 if (CHIP_INT_MODE_IS_BC(bp)) {
4086 igu_sp_sb_index = DEF_SB_IGU_ID;
4087 igu_seg_id = HC_SEG_ACCESS_DEF;
4088 } else {
4089 igu_sp_sb_index = bp->igu_dsb_id;
4090 igu_seg_id = IGU_SEG_ACCESS_DEF;
4091 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004092
4093 /* ATTN */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004094 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004095 atten_status_block);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004096 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004097
Eliezer Tamir49d66772008-02-28 11:53:13 -08004098 bp->attn_state = 0;
4099
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004100 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4101 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004102 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004103 int sindex;
4104 /* take care of sig[0]..sig[4] */
4105 for (sindex = 0; sindex < 4; sindex++)
4106 bp->attn_group[index].sig[sindex] =
4107 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004108
4109 if (CHIP_IS_E2(bp))
4110 /*
4111 * enable5 is separate from the rest of the registers,
4112 * and therefore the address skip is 4
4113 * and not 16 between the different groups
4114 */
4115 bp->attn_group[index].sig[4] = REG_RD(bp,
4116 reg_offset + 0x10 + 0x4*index);
4117 else
4118 bp->attn_group[index].sig[4] = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004119 }
4120
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004121 if (bp->common.int_block == INT_BLOCK_HC) {
4122 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4123 HC_REG_ATTN_MSG0_ADDR_L);
4124
4125 REG_WR(bp, reg_offset, U64_LO(section));
4126 REG_WR(bp, reg_offset + 4, U64_HI(section));
4127 } else if (CHIP_IS_E2(bp)) {
4128 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4129 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4130 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004131
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004132 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4133 sp_sb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004134
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004135 bnx2x_zero_sp_sb(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004136
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004137 sp_sb_data.host_sb_addr.lo = U64_LO(section);
4138 sp_sb_data.host_sb_addr.hi = U64_HI(section);
4139 sp_sb_data.igu_sb_id = igu_sp_sb_index;
4140 sp_sb_data.igu_seg_id = igu_seg_id;
4141 sp_sb_data.p_func.pf_id = func;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004142 sp_sb_data.p_func.vnic_id = BP_VN(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004143 sp_sb_data.p_func.vf_id = 0xff;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004144
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004145 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004146
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004147 bp->stats_pending = 0;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004148 bp->set_mac_pending = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004149
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004150 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004151}
4152
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004153void bnx2x_update_coalesce(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004154{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004155 int i;
4156
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004157 for_each_eth_queue(bp, i)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004158 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
Ariel Elior423cfa7e2011-03-14 13:43:22 -07004159 bp->tx_ticks, bp->rx_ticks);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004160}
4161
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004162static void bnx2x_init_sp_ring(struct bnx2x *bp)
4163{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004164 spin_lock_init(&bp->spq_lock);
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08004165 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004166
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004167 bp->spq_prod_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004168 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4169 bp->spq_prod_bd = bp->spq;
4170 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004171}
4172
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004173static void bnx2x_init_eq_ring(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004174{
4175 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004176 for (i = 1; i <= NUM_EQ_PAGES; i++) {
4177 union event_ring_elem *elem =
4178 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004179
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004180 elem->next_page.addr.hi =
4181 cpu_to_le32(U64_HI(bp->eq_mapping +
4182 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
4183 elem->next_page.addr.lo =
4184 cpu_to_le32(U64_LO(bp->eq_mapping +
4185 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004186 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004187 bp->eq_cons = 0;
4188 bp->eq_prod = NUM_EQ_DESC;
4189 bp->eq_cons_sb = BNX2X_EQ_INDEX;
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08004190 /* we want a warning message before it gets rought... */
4191 atomic_set(&bp->eq_spq_left,
4192 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004193}
4194
Tom Herbertab532cf2011-02-16 10:27:02 +00004195void bnx2x_push_indir_table(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004196{
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08004197 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004198 int i;
4199
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004200 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004201 return;
4202
4203 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004204 REG_WR8(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08004205 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
Tom Herbertab532cf2011-02-16 10:27:02 +00004206 bp->fp->cl_id + bp->rx_indir_table[i]);
4207}
4208
4209static void bnx2x_init_ind_table(struct bnx2x *bp)
4210{
4211 int i;
4212
4213 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4214 bp->rx_indir_table[i] = i % BNX2X_NUM_ETH_QUEUES(bp);
4215
4216 bnx2x_push_indir_table(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004217}
4218
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004219void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004220{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004221 int mode = bp->rx_mode;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004222 int port = BP_PORT(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004223 u16 cl_id;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004224 u32 def_q_filters = 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004225
Eilon Greenstein581ce432009-07-29 00:20:04 +00004226 /* All but management unicast packets should pass to the host as well */
4227 u32 llh_mask =
4228 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4229 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4230 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4231 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004232
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004233 switch (mode) {
4234 case BNX2X_RX_MODE_NONE: /* no Rx */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004235 def_q_filters = BNX2X_ACCEPT_NONE;
4236#ifdef BCM_CNIC
4237 if (!NO_FCOE(bp)) {
4238 cl_id = bnx2x_fcoe(bp, cl_id);
4239 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4240 }
4241#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004242 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004243
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004244 case BNX2X_RX_MODE_NORMAL:
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004245 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4246 BNX2X_ACCEPT_MULTICAST;
4247#ifdef BCM_CNIC
Vladislav Zolotarov711c9142011-02-06 11:21:49 -08004248 if (!NO_FCOE(bp)) {
4249 cl_id = bnx2x_fcoe(bp, cl_id);
4250 bnx2x_rxq_set_mac_filters(bp, cl_id,
4251 BNX2X_ACCEPT_UNICAST |
4252 BNX2X_ACCEPT_MULTICAST);
4253 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004254#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004255 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004256
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004257 case BNX2X_RX_MODE_ALLMULTI:
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004258 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4259 BNX2X_ACCEPT_ALL_MULTICAST;
4260#ifdef BCM_CNIC
Vladislav Zolotarov711c9142011-02-06 11:21:49 -08004261 /*
4262 * Prevent duplication of multicast packets by configuring FCoE
4263 * L2 Client to receive only matched unicast frames.
4264 */
4265 if (!NO_FCOE(bp)) {
4266 cl_id = bnx2x_fcoe(bp, cl_id);
4267 bnx2x_rxq_set_mac_filters(bp, cl_id,
4268 BNX2X_ACCEPT_UNICAST);
4269 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004270#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004271 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004272
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004273 case BNX2X_RX_MODE_PROMISC:
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004274 def_q_filters |= BNX2X_PROMISCUOUS_MODE;
4275#ifdef BCM_CNIC
Vladislav Zolotarov711c9142011-02-06 11:21:49 -08004276 /*
4277 * Prevent packets duplication by configuring DROP_ALL for FCoE
4278 * L2 Client.
4279 */
4280 if (!NO_FCOE(bp)) {
4281 cl_id = bnx2x_fcoe(bp, cl_id);
4282 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4283 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004284#endif
Eilon Greenstein581ce432009-07-29 00:20:04 +00004285 /* pass management unicast packets as well */
4286 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004287 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004288
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004289 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004290 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4291 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004292 }
4293
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004294 cl_id = BP_L_ID(bp);
4295 bnx2x_rxq_set_mac_filters(bp, cl_id, def_q_filters);
4296
Eilon Greenstein581ce432009-07-29 00:20:04 +00004297 REG_WR(bp,
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004298 (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
4299 NIG_REG_LLH0_BRB1_DRV_MASK), llh_mask);
Eilon Greenstein581ce432009-07-29 00:20:04 +00004300
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004301 DP(NETIF_MSG_IFUP, "rx mode %d\n"
4302 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004303 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n"
4304 "unmatched_ucast 0x%x\n", mode,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004305 bp->mac_filters.ucast_drop_all,
4306 bp->mac_filters.mcast_drop_all,
4307 bp->mac_filters.bcast_drop_all,
4308 bp->mac_filters.ucast_accept_all,
4309 bp->mac_filters.mcast_accept_all,
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004310 bp->mac_filters.bcast_accept_all,
4311 bp->mac_filters.unmatched_unicast
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004312 );
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004313
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004314 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004315}
4316
Eilon Greenstein471de712008-08-13 15:49:35 -07004317static void bnx2x_init_internal_common(struct bnx2x *bp)
4318{
4319 int i;
4320
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004321 if (!CHIP_IS_E1(bp)) {
4322
4323 /* xstorm needs to know whether to add ovlan to packets or not,
4324 * in switch-independent we'll write 0 to here... */
4325 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004326 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004327 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004328 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004329 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004330 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004331 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004332 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004333 }
4334
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08004335 if (IS_MF_SI(bp))
4336 /*
4337 * In switch independent mode, the TSTORM needs to accept
4338 * packets that failed classification, since approximate match
4339 * mac addresses aren't written to NIG LLH
4340 */
4341 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4342 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
4343
Eilon Greenstein471de712008-08-13 15:49:35 -07004344 /* Zero this manually as its initialization is
4345 currently missing in the initTool */
4346 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4347 REG_WR(bp, BAR_USTRORM_INTMEM +
4348 USTORM_AGG_DATA_OFFSET + i * 4, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004349 if (CHIP_IS_E2(bp)) {
4350 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
4351 CHIP_INT_MODE_IS_BC(bp) ?
4352 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
4353 }
Eilon Greenstein471de712008-08-13 15:49:35 -07004354}
4355
4356static void bnx2x_init_internal_port(struct bnx2x *bp)
4357{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004358 /* port */
Vladislav Zolotarove4901dd2010-12-13 05:44:18 +00004359 bnx2x_dcb_init_intmem_pfc(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004360}
4361
Eilon Greenstein471de712008-08-13 15:49:35 -07004362static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4363{
4364 switch (load_code) {
4365 case FW_MSG_CODE_DRV_LOAD_COMMON:
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004366 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
Eilon Greenstein471de712008-08-13 15:49:35 -07004367 bnx2x_init_internal_common(bp);
4368 /* no break */
4369
4370 case FW_MSG_CODE_DRV_LOAD_PORT:
4371 bnx2x_init_internal_port(bp);
4372 /* no break */
4373
4374 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004375 /* internal memory per function is
4376 initialized inside bnx2x_pf_init */
Eilon Greenstein471de712008-08-13 15:49:35 -07004377 break;
4378
4379 default:
4380 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4381 break;
4382 }
4383}
4384
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004385static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
4386{
4387 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
4388
4389 fp->state = BNX2X_FP_STATE_CLOSED;
4390
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004391 fp->cid = fp_idx;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004392 fp->cl_id = BP_L_ID(bp) + fp_idx;
4393 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
4394 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
4395 /* qZone id equals to FW (per path) client id */
4396 fp->cl_qzone_id = fp->cl_id +
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004397 BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4398 ETH_MAX_RX_CLIENTS_E1H);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004399 /* init shortcut */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004400 fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4401 USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004402 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4403 /* Setup SB indicies */
4404 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4405 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4406
4407 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
4408 "cl_id %d fw_sb %d igu_sb %d\n",
4409 fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4410 fp->igu_sb_id);
4411 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4412 fp->fw_sb_id, fp->igu_sb_id);
4413
4414 bnx2x_update_fpsb_idx(fp);
4415}
4416
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004417void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004418{
4419 int i;
4420
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004421 for_each_eth_queue(bp, i)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004422 bnx2x_init_fp_sb(bp, i);
Michael Chan37b091b2009-10-10 13:46:55 +00004423#ifdef BCM_CNIC
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004424 if (!NO_FCOE(bp))
4425 bnx2x_init_fcoe_fp(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004426
4427 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4428 BNX2X_VF_ID_INVALID, false,
4429 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4430
Michael Chan37b091b2009-10-10 13:46:55 +00004431#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004432
Yaniv Rosner020c7e32011-05-31 21:28:43 +00004433 /* Initialize MOD_ABS interrupts */
4434 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
4435 bp->common.shmem_base, bp->common.shmem2_base,
4436 BP_PORT(bp));
Eilon Greenstein16119782009-03-02 07:59:27 +00004437 /* ensure status block indices were read */
4438 rmb();
4439
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004440 bnx2x_init_def_sb(bp);
Eilon Greenstein5c862842008-08-13 15:51:48 -07004441 bnx2x_update_dsb_idx(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004442 bnx2x_init_rx_rings(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004443 bnx2x_init_tx_rings(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004444 bnx2x_init_sp_ring(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004445 bnx2x_init_eq_ring(bp);
Eilon Greenstein471de712008-08-13 15:49:35 -07004446 bnx2x_init_internal(bp, load_code);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004447 bnx2x_pf_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004448 bnx2x_init_ind_table(bp);
Eilon Greenstein0ef00452009-01-14 21:31:08 -08004449 bnx2x_stats_init(bp);
4450
Eilon Greenstein0ef00452009-01-14 21:31:08 -08004451 /* flush all before enabling interrupts */
4452 mb();
4453 mmiowb();
4454
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08004455 bnx2x_int_enable(bp);
Eilon Greensteineb8da202009-07-21 05:47:30 +00004456
4457 /* Check for SPIO5 */
4458 bnx2x_attn_int_deasserted0(bp,
4459 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4460 AEU_INPUTS_ATTN_BITS_SPIO5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004461}
4462
4463/* end of nic init */
4464
4465/*
4466 * gzip service functions
4467 */
4468
4469static int bnx2x_gunzip_init(struct bnx2x *bp)
4470{
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004471 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4472 &bp->gunzip_mapping, GFP_KERNEL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004473 if (bp->gunzip_buf == NULL)
4474 goto gunzip_nomem1;
4475
4476 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4477 if (bp->strm == NULL)
4478 goto gunzip_nomem2;
4479
4480 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4481 GFP_KERNEL);
4482 if (bp->strm->workspace == NULL)
4483 goto gunzip_nomem3;
4484
4485 return 0;
4486
4487gunzip_nomem3:
4488 kfree(bp->strm);
4489 bp->strm = NULL;
4490
4491gunzip_nomem2:
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004492 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4493 bp->gunzip_mapping);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004494 bp->gunzip_buf = NULL;
4495
4496gunzip_nomem1:
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004497 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4498 " un-compression\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004499 return -ENOMEM;
4500}
4501
4502static void bnx2x_gunzip_end(struct bnx2x *bp)
4503{
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004504 if (bp->strm) {
4505 kfree(bp->strm->workspace);
4506 kfree(bp->strm);
4507 bp->strm = NULL;
4508 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004509
4510 if (bp->gunzip_buf) {
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004511 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4512 bp->gunzip_mapping);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004513 bp->gunzip_buf = NULL;
4514 }
4515}
4516
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004517static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004518{
4519 int n, rc;
4520
4521 /* check gzip header */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004522 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4523 BNX2X_ERR("Bad gzip header\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004524 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004525 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004526
4527 n = 10;
4528
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004529#define FNAME 0x8
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004530
4531 if (zbuf[3] & FNAME)
4532 while ((zbuf[n++] != 0) && (n < len));
4533
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004534 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004535 bp->strm->avail_in = len - n;
4536 bp->strm->next_out = bp->gunzip_buf;
4537 bp->strm->avail_out = FW_BUF_SIZE;
4538
4539 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4540 if (rc != Z_OK)
4541 return rc;
4542
4543 rc = zlib_inflate(bp->strm, Z_FINISH);
4544 if ((rc != Z_OK) && (rc != Z_STREAM_END))
Joe Perches7995c642010-02-17 15:01:52 +00004545 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4546 bp->strm->msg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004547
4548 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4549 if (bp->gunzip_outlen & 0x3)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004550 netdev_err(bp->dev, "Firmware decompression error:"
4551 " gunzip_outlen (%d) not aligned\n",
4552 bp->gunzip_outlen);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004553 bp->gunzip_outlen >>= 2;
4554
4555 zlib_inflateEnd(bp->strm);
4556
4557 if (rc == Z_STREAM_END)
4558 return 0;
4559
4560 return rc;
4561}
4562
4563/* nic load/unload */
4564
4565/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004566 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004567 */
4568
4569/* send a NIG loopback debug packet */
4570static void bnx2x_lb_pckt(struct bnx2x *bp)
4571{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004572 u32 wb_write[3];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004573
4574 /* Ethernet source and destination addresses */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004575 wb_write[0] = 0x55555555;
4576 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004577 wb_write[2] = 0x20; /* SOP */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004578 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004579
4580 /* NON-IP protocol */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004581 wb_write[0] = 0x09000000;
4582 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004583 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004584 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004585}
4586
4587/* some of the internal memories
4588 * are not directly readable from the driver
4589 * to test them we send debug packets
4590 */
4591static int bnx2x_int_mem_test(struct bnx2x *bp)
4592{
4593 int factor;
4594 int count, i;
4595 u32 val = 0;
4596
Eilon Greensteinad8d3942008-06-23 20:29:02 -07004597 if (CHIP_REV_IS_FPGA(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004598 factor = 120;
Eilon Greensteinad8d3942008-06-23 20:29:02 -07004599 else if (CHIP_REV_IS_EMUL(bp))
4600 factor = 200;
4601 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004602 factor = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004603
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004604 /* Disable inputs of parser neighbor blocks */
4605 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4606 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4607 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004608 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004609
4610 /* Write 0 to parser credits for CFC search request */
4611 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4612
4613 /* send Ethernet packet */
4614 bnx2x_lb_pckt(bp);
4615
4616 /* TODO do i reset NIG statistic? */
4617 /* Wait until NIG register shows 1 packet of size 0x10 */
4618 count = 1000 * factor;
4619 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004620
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004621 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4622 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004623 if (val == 0x10)
4624 break;
4625
4626 msleep(10);
4627 count--;
4628 }
4629 if (val != 0x10) {
4630 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4631 return -1;
4632 }
4633
4634 /* Wait until PRS register shows 1 packet */
4635 count = 1000 * factor;
4636 while (count) {
4637 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004638 if (val == 1)
4639 break;
4640
4641 msleep(10);
4642 count--;
4643 }
4644 if (val != 0x1) {
4645 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4646 return -2;
4647 }
4648
4649 /* Reset and init BRB, PRS */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004650 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004651 msleep(50);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004652 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004653 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004654 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4655 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004656
4657 DP(NETIF_MSG_HW, "part2\n");
4658
4659 /* Disable inputs of parser neighbor blocks */
4660 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4661 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4662 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004663 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004664
4665 /* Write 0 to parser credits for CFC search request */
4666 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4667
4668 /* send 10 Ethernet packets */
4669 for (i = 0; i < 10; i++)
4670 bnx2x_lb_pckt(bp);
4671
4672 /* Wait until NIG register shows 10 + 1
4673 packets of size 11*0x10 = 0xb0 */
4674 count = 1000 * factor;
4675 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004676
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004677 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4678 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004679 if (val == 0xb0)
4680 break;
4681
4682 msleep(10);
4683 count--;
4684 }
4685 if (val != 0xb0) {
4686 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4687 return -3;
4688 }
4689
4690 /* Wait until PRS register shows 2 packets */
4691 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4692 if (val != 2)
4693 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4694
4695 /* Write 1 to parser credits for CFC search request */
4696 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4697
4698 /* Wait until PRS register shows 3 packets */
4699 msleep(10 * factor);
4700 /* Wait until NIG register shows 1 packet of size 0x10 */
4701 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4702 if (val != 3)
4703 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4704
4705 /* clear NIG EOP FIFO */
4706 for (i = 0; i < 11; i++)
4707 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4708 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4709 if (val != 1) {
4710 BNX2X_ERR("clear of NIG failed\n");
4711 return -4;
4712 }
4713
4714 /* Reset and init BRB, PRS, NIG */
4715 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4716 msleep(50);
4717 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4718 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004719 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4720 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00004721#ifndef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004722 /* set NIC mode */
4723 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4724#endif
4725
4726 /* Enable inputs of parser neighbor blocks */
4727 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4728 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4729 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004730 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004731
4732 DP(NETIF_MSG_HW, "done\n");
4733
4734 return 0; /* OK */
4735}
4736
Vladislav Zolotarov4a33bc02011-01-09 02:20:04 +00004737static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004738{
4739 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004740 if (CHIP_IS_E2(bp))
4741 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4742 else
4743 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004744 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4745 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004746 /*
4747 * mask read length error interrupts in brb for parser
4748 * (parsing unit and 'checksum and crc' unit)
4749 * these errors are legal (PU reads fixed length and CAC can cause
4750 * read length error on truncated packets)
4751 */
4752 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004753 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4754 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4755 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4756 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4757 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004758/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4759/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004760 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4761 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4762 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004763/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4764/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004765 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4766 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4767 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4768 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004769/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4770/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004771
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004772 if (CHIP_REV_IS_FPGA(bp))
4773 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004774 else if (CHIP_IS_E2(bp))
4775 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4776 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4777 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4778 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4779 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4780 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004781 else
4782 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004783 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4784 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4785 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004786/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4787/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004788 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4789 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004790/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
Vladislav Zolotarov4a33bc02011-01-09 02:20:04 +00004791 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004792}
4793
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00004794static void bnx2x_reset_common(struct bnx2x *bp)
4795{
4796 /* reset_common */
4797 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4798 0xd3ffff7f);
4799 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4800}
4801
Eilon Greenstein573f2032009-08-12 08:24:14 +00004802static void bnx2x_init_pxp(struct bnx2x *bp)
4803{
4804 u16 devctl;
4805 int r_order, w_order;
4806
4807 pci_read_config_word(bp->pdev,
4808 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4809 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4810 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4811 if (bp->mrrs == -1)
4812 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4813 else {
4814 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4815 r_order = bp->mrrs;
4816 }
4817
4818 bnx2x_init_pxp_arb(bp, r_order, w_order);
4819}
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004820
4821static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4822{
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004823 int is_required;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004824 u32 val;
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004825 int port;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004826
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004827 if (BP_NOMCP(bp))
4828 return;
4829
4830 is_required = 0;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004831 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4832 SHARED_HW_CFG_FAN_FAILURE_MASK;
4833
4834 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4835 is_required = 1;
4836
4837 /*
4838 * The fan failure mechanism is usually related to the PHY type since
4839 * the power consumption of the board is affected by the PHY. Currently,
4840 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4841 */
4842 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4843 for (port = PORT_0; port < PORT_MAX; port++) {
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004844 is_required |=
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004845 bnx2x_fan_failure_det_req(
4846 bp,
4847 bp->common.shmem_base,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00004848 bp->common.shmem2_base,
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004849 port);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004850 }
4851
4852 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4853
4854 if (is_required == 0)
4855 return;
4856
4857 /* Fan failure is indicated by SPIO 5 */
4858 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4859 MISC_REGISTERS_SPIO_INPUT_HI_Z);
4860
4861 /* set to active low mode */
4862 val = REG_RD(bp, MISC_REG_SPIO_INT);
4863 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004864 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004865 REG_WR(bp, MISC_REG_SPIO_INT, val);
4866
4867 /* enable interrupt to signal the IGU */
4868 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4869 val |= (1 << MISC_REGISTERS_SPIO_5);
4870 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4871}
4872
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004873static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4874{
4875 u32 offset = 0;
4876
4877 if (CHIP_IS_E1(bp))
4878 return;
4879 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4880 return;
4881
4882 switch (BP_ABS_FUNC(bp)) {
4883 case 0:
4884 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4885 break;
4886 case 1:
4887 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4888 break;
4889 case 2:
4890 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4891 break;
4892 case 3:
4893 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4894 break;
4895 case 4:
4896 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4897 break;
4898 case 5:
4899 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4900 break;
4901 case 6:
4902 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4903 break;
4904 case 7:
4905 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4906 break;
4907 default:
4908 return;
4909 }
4910
4911 REG_WR(bp, offset, pretend_func_num);
4912 REG_RD(bp, offset);
4913 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4914}
4915
4916static void bnx2x_pf_disable(struct bnx2x *bp)
4917{
4918 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4919 val &= ~IGU_PF_CONF_FUNC_EN;
4920
4921 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4922 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4923 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4924}
4925
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004926static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004927{
4928 u32 val, i;
4929
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004930 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004931
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00004932 bnx2x_reset_common(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004933 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4934 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
4935
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004936 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004937 if (!CHIP_IS_E1(bp))
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004938 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004939
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004940 if (CHIP_IS_E2(bp)) {
4941 u8 fid;
4942
4943 /**
4944 * 4-port mode or 2-port mode we need to turn of master-enable
4945 * for everyone, after that, turn it back on for self.
4946 * so, we disregard multi-function or not, and always disable
4947 * for all functions on the given path, this means 0,2,4,6 for
4948 * path 0 and 1,3,5,7 for path 1
4949 */
4950 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
4951 if (fid == BP_ABS_FUNC(bp)) {
4952 REG_WR(bp,
4953 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
4954 1);
4955 continue;
4956 }
4957
4958 bnx2x_pretend_func(bp, fid);
4959 /* clear pf enable */
4960 bnx2x_pf_disable(bp);
4961 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4962 }
4963 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004964
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004965 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004966 if (CHIP_IS_E1(bp)) {
4967 /* enable HW interrupt from PXP on USDM overflow
4968 bit 16 on INT_MASK_0 */
4969 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004970 }
4971
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004972 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004973 bnx2x_init_pxp(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004974
4975#ifdef __BIG_ENDIAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004976 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
4977 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
4978 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
4979 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
4980 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
Eilon Greenstein8badd272009-02-12 08:36:15 +00004981 /* make sure this value is 0 */
4982 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004983
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004984/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
4985 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
4986 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
4987 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
4988 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004989#endif
4990
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004991 bnx2x_ilt_init_page_size(bp, INITOP_SET);
4992
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004993 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
4994 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004995
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004996 /* let the HW do it's magic ... */
4997 msleep(100);
4998 /* finish PXP init */
4999 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5000 if (val != 1) {
5001 BNX2X_ERR("PXP2 CFG failed\n");
5002 return -EBUSY;
5003 }
5004 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5005 if (val != 1) {
5006 BNX2X_ERR("PXP2 RD_INIT failed\n");
5007 return -EBUSY;
5008 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005009
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005010 /* Timers bug workaround E2 only. We need to set the entire ILT to
5011 * have entries with value "0" and valid bit on.
5012 * This needs to be done by the first PF that is loaded in a path
5013 * (i.e. common phase)
5014 */
5015 if (CHIP_IS_E2(bp)) {
5016 struct ilt_client_info ilt_cli;
5017 struct bnx2x_ilt ilt;
5018 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
5019 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
5020
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04005021 /* initialize dummy TM client */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005022 ilt_cli.start = 0;
5023 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
5024 ilt_cli.client_num = ILT_CLIENT_TM;
5025
5026 /* Step 1: set zeroes to all ilt page entries with valid bit on
5027 * Step 2: set the timers first/last ilt entry to point
5028 * to the entire range to prevent ILT range error for 3rd/4th
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005029 * vnic (this code assumes existence of the vnic)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005030 *
5031 * both steps performed by call to bnx2x_ilt_client_init_op()
5032 * with dummy TM client
5033 *
5034 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
5035 * and his brother are split registers
5036 */
5037 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
5038 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
5039 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
5040
5041 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
5042 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
5043 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
5044 }
5045
5046
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005047 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5048 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005049
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005050 if (CHIP_IS_E2(bp)) {
5051 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
5052 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
5053 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
5054
5055 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
5056
5057 /* let the HW do it's magic ... */
5058 do {
5059 msleep(200);
5060 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
5061 } while (factor-- && (val != 1));
5062
5063 if (val != 1) {
5064 BNX2X_ERR("ATC_INIT failed\n");
5065 return -EBUSY;
5066 }
5067 }
5068
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005069 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005070
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005071 /* clean the DMAE memory */
5072 bp->dmae_ready = 1;
5073 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005074
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005075 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5076 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5077 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5078 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005079
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005080 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5081 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5082 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5083 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5084
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005085 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00005086
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005087 if (CHIP_MODE_IS_4_PORT(bp))
5088 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005089
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005090 /* QM queues pointers table */
5091 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
Michael Chan37b091b2009-10-10 13:46:55 +00005092
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005093 /* soft reset pulse */
5094 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5095 REG_WR(bp, QM_REG_SOFT_RESET, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005096
Michael Chan37b091b2009-10-10 13:46:55 +00005097#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005098 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005099#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005100
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005101 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005102 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
5103
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005104 if (!CHIP_REV_IS_SLOW(bp)) {
5105 /* enable hw interrupt from doorbell Q */
5106 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5107 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005108
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005109 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005110 if (CHIP_MODE_IS_4_PORT(bp)) {
5111 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5112 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5113 }
5114
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005115 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08005116 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
Michael Chan37b091b2009-10-10 13:46:55 +00005117#ifndef BCM_CNIC
Eilon Greenstein3196a882008-08-13 15:58:49 -07005118 /* set NIC mode */
5119 REG_WR(bp, PRS_REG_NIC_MODE, 1);
Michael Chan37b091b2009-10-10 13:46:55 +00005120#endif
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005121 if (!CHIP_IS_E1(bp))
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005122 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF_SD(bp));
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005123
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005124 if (CHIP_IS_E2(bp)) {
5125 /* Bit-map indicating which L2 hdrs may appear after the
5126 basic Ethernet header */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005127 int has_ovlan = IS_MF_SD(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005128 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5129 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5130 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005131
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005132 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5133 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5134 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5135 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005136
Eilon Greensteinca003922009-08-12 22:53:28 -07005137 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5138 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5139 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5140 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005141
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005142 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5143 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5144 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5145 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005146
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005147 if (CHIP_MODE_IS_4_PORT(bp))
5148 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5149
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005150 /* sync semi rtc */
5151 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5152 0x80000000);
5153 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5154 0x80000000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005155
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005156 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5157 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5158 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005159
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005160 if (CHIP_IS_E2(bp)) {
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005161 int has_ovlan = IS_MF_SD(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005162 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5163 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5164 }
5165
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005166 REG_WR(bp, SRC_REG_SOFT_RST, 1);
Tom Herbertc68ed252010-04-23 00:10:52 -07005167 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5168 REG_WR(bp, i, random32());
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005169
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005170 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00005171#ifdef BCM_CNIC
5172 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
5173 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
5174 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
5175 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
5176 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
5177 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
5178 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
5179 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
5180 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
5181 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
5182#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005183 REG_WR(bp, SRC_REG_SOFT_RST, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005184
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005185 if (sizeof(union cdu_context) != 1024)
5186 /* we currently assume that a context is 1024 bytes */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005187 dev_alert(&bp->pdev->dev, "please adjust the size "
5188 "of cdu_context(%ld)\n",
Joe Perches7995c642010-02-17 15:01:52 +00005189 (long)sizeof(union cdu_context));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005190
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005191 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005192 val = (4 << 24) + (0 << 12) + 1024;
5193 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005194
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005195 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005196 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005197 /* enable context validation interrupt from CFC */
5198 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5199
5200 /* set the thresholds to prevent CFC/CDU race */
5201 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005202
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005203 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005204
5205 if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5206 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5207
5208 bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005209 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005210
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005211 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005212 /* Reset PCIE errors for debug */
5213 REG_WR(bp, 0x2814, 0xffffffff);
5214 REG_WR(bp, 0x3820, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005215
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005216 if (CHIP_IS_E2(bp)) {
5217 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5218 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5219 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5220 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5221 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5222 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5223 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5224 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5225 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5226 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5227 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5228 }
5229
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005230 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005231 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005232 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005233 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005234
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005235 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005236 if (!CHIP_IS_E1(bp)) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005237 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005238 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005239 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005240 if (CHIP_IS_E2(bp)) {
5241 /* Bit-map indicating which L2 hdrs may appear after the
5242 basic Ethernet header */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005243 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF_SD(bp) ? 7 : 6));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005244 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005245
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005246 if (CHIP_REV_IS_SLOW(bp))
5247 msleep(200);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005248
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005249 /* finish CFC init */
5250 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5251 if (val != 1) {
5252 BNX2X_ERR("CFC LL_INIT failed\n");
5253 return -EBUSY;
5254 }
5255 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5256 if (val != 1) {
5257 BNX2X_ERR("CFC AC_INIT failed\n");
5258 return -EBUSY;
5259 }
5260 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5261 if (val != 1) {
5262 BNX2X_ERR("CFC CAM_INIT failed\n");
5263 return -EBUSY;
5264 }
5265 REG_WR(bp, CFC_REG_DEBUG0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005266
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005267 if (CHIP_IS_E1(bp)) {
5268 /* read NIG statistic
5269 to see if this is our first up since powerup */
5270 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5271 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005272
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005273 /* do internal memory self test */
5274 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5275 BNX2X_ERR("internal mem self test failed\n");
5276 return -EBUSY;
5277 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005278 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005279
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00005280 bnx2x_setup_fan_failure_detection(bp);
5281
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005282 /* clear PXP2 attentions */
5283 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005284
Vladislav Zolotarov4a33bc02011-01-09 02:20:04 +00005285 bnx2x_enable_blocks_attention(bp);
5286 if (CHIP_PARITY_ENABLED(bp))
5287 bnx2x_enable_blocks_parity(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005288
Yaniv Rosner6bbca912008-08-13 15:57:28 -07005289 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005290 /* In E2 2-PORT mode, same ext phy is used for the two paths */
5291 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
5292 CHIP_IS_E1x(bp)) {
5293 u32 shmem_base[2], shmem2_base[2];
5294 shmem_base[0] = bp->common.shmem_base;
5295 shmem2_base[0] = bp->common.shmem2_base;
5296 if (CHIP_IS_E2(bp)) {
5297 shmem_base[1] =
5298 SHMEM2_RD(bp, other_shmem_base_addr);
5299 shmem2_base[1] =
5300 SHMEM2_RD(bp, other_shmem2_base_addr);
5301 }
5302 bnx2x_acquire_phy_lock(bp);
5303 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5304 bp->common.chip_id);
5305 bnx2x_release_phy_lock(bp);
5306 }
Yaniv Rosner6bbca912008-08-13 15:57:28 -07005307 } else
5308 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5309
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005310 return 0;
5311}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005312
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005313static int bnx2x_init_hw_port(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005314{
5315 int port = BP_PORT(bp);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005316 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
Eilon Greenstein1c063282009-02-12 08:36:43 +00005317 u32 low, high;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005318 u32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005319
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005320 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005321
5322 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005323
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005324 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005325 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07005326
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005327 /* Timers bug workaround: disables the pf_master bit in pglue at
5328 * common phase, we need to enable it here before any dmae access are
5329 * attempted. Therefore we manually added the enable-master to the
5330 * port phase (it also happens in the function phase)
5331 */
5332 if (CHIP_IS_E2(bp))
5333 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5334
Eilon Greensteinca003922009-08-12 22:53:28 -07005335 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5336 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5337 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005338 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005339
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005340 /* QM cid (connection) count */
5341 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005342
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005343#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005344 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
Michael Chan37b091b2009-10-10 13:46:55 +00005345 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
5346 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005347#endif
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005348
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005349 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00005350
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005351 if (CHIP_MODE_IS_4_PORT(bp))
5352 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00005353
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005354 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
5355 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5356 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
5357 /* no pause for emulation and FPGA */
5358 low = 0;
5359 high = 513;
5360 } else {
5361 if (IS_MF(bp))
5362 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5363 else if (bp->dev->mtu > 4096) {
5364 if (bp->flags & ONE_PORT_FLAG)
5365 low = 160;
5366 else {
5367 val = bp->dev->mtu;
5368 /* (24*1024 + val*4)/256 */
5369 low = 96 + (val/64) +
5370 ((val % 64) ? 1 : 0);
5371 }
5372 } else
5373 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5374 high = low + 56; /* 14*1024/256 */
5375 }
5376 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5377 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5378 }
5379
5380 if (CHIP_MODE_IS_4_PORT(bp)) {
5381 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5382 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5383 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5384 BRB1_REG_MAC_GUARANTIED_0), 40);
5385 }
Eilon Greenstein1c063282009-02-12 08:36:43 +00005386
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005387 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07005388
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005389 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005390 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005391 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005392 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005393
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005394 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5395 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5396 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5397 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005398 if (CHIP_MODE_IS_4_PORT(bp))
5399 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005400
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005401 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005402 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005403
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005404 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005405
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005406 if (!CHIP_IS_E2(bp)) {
5407 /* configure PBF to work without PAUSE mtu 9000 */
5408 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005409
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005410 /* update threshold */
5411 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5412 /* update init credit */
5413 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005414
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005415 /* probe changes */
5416 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5417 udelay(50);
5418 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5419 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005420
Michael Chan37b091b2009-10-10 13:46:55 +00005421#ifdef BCM_CNIC
5422 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005423#endif
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005424 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005425 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005426
5427 if (CHIP_IS_E1(bp)) {
5428 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5429 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5430 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005431 bnx2x_init_block(bp, HC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005432
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005433 bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5434
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005435 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005436 /* init aeu_mask_attn_func_0/1:
5437 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5438 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5439 * bits 4-7 are used for "per vn group attention" */
Vladislav Zolotarove4901dd2010-12-13 05:44:18 +00005440 val = IS_MF(bp) ? 0xF7 : 0x7;
5441 /* Enable DCBX attention for all but E1 */
5442 val |= CHIP_IS_E1(bp) ? 0 : 0x10;
5443 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005444
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005445 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005446 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005447 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005448 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005449 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005450
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005451 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005452
5453 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5454
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005455 if (!CHIP_IS_E1(bp)) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005456 /* 0x2 disable mf_ov, 0x1 enable */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005457 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005458 (IS_MF_SD(bp) ? 0x1 : 0x2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005459
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005460 if (CHIP_IS_E2(bp)) {
5461 val = 0;
5462 switch (bp->mf_mode) {
5463 case MULTI_FUNCTION_SD:
5464 val = 1;
5465 break;
5466 case MULTI_FUNCTION_SI:
5467 val = 2;
5468 break;
5469 }
5470
5471 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5472 NIG_REG_LLH0_CLS_TYPE), val);
5473 }
Eilon Greenstein1c063282009-02-12 08:36:43 +00005474 {
5475 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5476 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5477 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5478 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005479 }
5480
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005481 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005482 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00005483 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00005484 bp->common.shmem2_base, port)) {
Eilon Greenstein4d295db2009-07-21 05:47:47 +00005485 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5486 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5487 val = REG_RD(bp, reg_addr);
Eliezer Tamirf1410642008-02-28 11:51:50 -08005488 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
Eilon Greenstein4d295db2009-07-21 05:47:47 +00005489 REG_WR(bp, reg_addr, val);
Eliezer Tamirf1410642008-02-28 11:51:50 -08005490 }
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07005491 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005492
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005493 return 0;
5494}
5495
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005496static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5497{
5498 int reg;
5499
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005500 if (CHIP_IS_E1(bp))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005501 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005502 else
5503 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005504
5505 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5506}
5507
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005508static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5509{
5510 bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5511}
5512
5513static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5514{
5515 u32 i, base = FUNC_ILT_BASE(func);
5516 for (i = base; i < base + ILT_PER_FUNC; i++)
5517 bnx2x_ilt_wr(bp, i, 0);
5518}
5519
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005520static int bnx2x_init_hw_func(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005521{
5522 int port = BP_PORT(bp);
5523 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005524 struct bnx2x_ilt *ilt = BP_ILT(bp);
5525 u16 cdu_ilt_start;
Eilon Greenstein8badd272009-02-12 08:36:15 +00005526 u32 addr, val;
Vladislav Zolotarovf4a66892010-10-19 05:13:09 +00005527 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
5528 int i, main_mem_width;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005529
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005530 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005531
Eilon Greenstein8badd272009-02-12 08:36:15 +00005532 /* set MSI reconfigure capability */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005533 if (bp->common.int_block == INT_BLOCK_HC) {
5534 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5535 val = REG_RD(bp, addr);
5536 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5537 REG_WR(bp, addr, val);
5538 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00005539
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005540 ilt = BP_ILT(bp);
5541 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005542
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005543 for (i = 0; i < L2_ILT_LINES(bp); i++) {
5544 ilt->lines[cdu_ilt_start + i].page =
5545 bp->context.vcxt + (ILT_PAGE_CIDS * i);
5546 ilt->lines[cdu_ilt_start + i].page_mapping =
5547 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
5548 /* cdu ilt pages are allocated manually so there's no need to
5549 set the size */
5550 }
5551 bnx2x_ilt_init_op(bp, INITOP_SET);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005552
Michael Chan37b091b2009-10-10 13:46:55 +00005553#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005554 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
Michael Chan37b091b2009-10-10 13:46:55 +00005555
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005556 /* T1 hash bits value determines the T1 number of entries */
5557 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
Michael Chan37b091b2009-10-10 13:46:55 +00005558#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005559
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005560#ifndef BCM_CNIC
5561 /* set NIC mode */
5562 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5563#endif /* BCM_CNIC */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005564
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005565 if (CHIP_IS_E2(bp)) {
5566 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5567
5568 /* Turn on a single ISR mode in IGU if driver is going to use
5569 * INT#x or MSI
5570 */
5571 if (!(bp->flags & USING_MSIX_FLAG))
5572 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5573 /*
5574 * Timers workaround bug: function init part.
5575 * Need to wait 20msec after initializing ILT,
5576 * needed to make sure there are no requests in
5577 * one of the PXP internal queues with "old" ILT addresses
5578 */
5579 msleep(20);
5580 /*
5581 * Master enable - Due to WB DMAE writes performed before this
5582 * register is re-initialized as part of the regular function
5583 * init
5584 */
5585 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5586 /* Enable the function in IGU */
5587 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5588 }
5589
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005590 bp->dmae_ready = 1;
5591
5592 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
5593
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005594 if (CHIP_IS_E2(bp))
5595 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5596
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005597 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5598 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5599 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5600 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5601 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5602 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5603 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5604 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5605 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5606
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005607 if (CHIP_IS_E2(bp)) {
5608 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5609 BP_PATH(bp));
5610 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5611 BP_PATH(bp));
5612 }
5613
5614 if (CHIP_MODE_IS_4_PORT(bp))
5615 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5616
5617 if (CHIP_IS_E2(bp))
5618 REG_WR(bp, QM_REG_PF_EN, 1);
5619
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005620 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005621
5622 if (CHIP_MODE_IS_4_PORT(bp))
5623 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5624
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005625 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5626 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5627 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5628 bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5629 bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5630 bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5631 bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5632 bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5633 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5634 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5635 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005636 if (CHIP_IS_E2(bp))
5637 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5638
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005639 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
5640
5641 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
5642
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005643 if (CHIP_IS_E2(bp))
5644 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5645
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005646 if (IS_MF(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005647 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005648 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005649 }
5650
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005651 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5652
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005653 /* HC init per function */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005654 if (bp->common.int_block == INT_BLOCK_HC) {
5655 if (CHIP_IS_E1H(bp)) {
5656 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5657
5658 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5659 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5660 }
5661 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5662
5663 } else {
5664 int num_segs, sb_idx, prod_offset;
5665
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005666 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5667
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005668 if (CHIP_IS_E2(bp)) {
5669 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5670 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5671 }
5672
5673 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5674
5675 if (CHIP_IS_E2(bp)) {
5676 int dsb_idx = 0;
5677 /**
5678 * Producer memory:
5679 * E2 mode: address 0-135 match to the mapping memory;
5680 * 136 - PF0 default prod; 137 - PF1 default prod;
5681 * 138 - PF2 default prod; 139 - PF3 default prod;
5682 * 140 - PF0 attn prod; 141 - PF1 attn prod;
5683 * 142 - PF2 attn prod; 143 - PF3 attn prod;
5684 * 144-147 reserved.
5685 *
5686 * E1.5 mode - In backward compatible mode;
5687 * for non default SB; each even line in the memory
5688 * holds the U producer and each odd line hold
5689 * the C producer. The first 128 producers are for
5690 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5691 * producers are for the DSB for each PF.
5692 * Each PF has five segments: (the order inside each
5693 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5694 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5695 * 144-147 attn prods;
5696 */
5697 /* non-default-status-blocks */
5698 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5699 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5700 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5701 prod_offset = (bp->igu_base_sb + sb_idx) *
5702 num_segs;
5703
5704 for (i = 0; i < num_segs; i++) {
5705 addr = IGU_REG_PROD_CONS_MEMORY +
5706 (prod_offset + i) * 4;
5707 REG_WR(bp, addr, 0);
5708 }
5709 /* send consumer update with value 0 */
5710 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5711 USTORM_ID, 0, IGU_INT_NOP, 1);
5712 bnx2x_igu_clear_sb(bp,
5713 bp->igu_base_sb + sb_idx);
5714 }
5715
5716 /* default-status-blocks */
5717 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5718 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5719
5720 if (CHIP_MODE_IS_4_PORT(bp))
5721 dsb_idx = BP_FUNC(bp);
5722 else
5723 dsb_idx = BP_E1HVN(bp);
5724
5725 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5726 IGU_BC_BASE_DSB_PROD + dsb_idx :
5727 IGU_NORM_BASE_DSB_PROD + dsb_idx);
5728
5729 for (i = 0; i < (num_segs * E1HVN_MAX);
5730 i += E1HVN_MAX) {
5731 addr = IGU_REG_PROD_CONS_MEMORY +
5732 (prod_offset + i)*4;
5733 REG_WR(bp, addr, 0);
5734 }
5735 /* send consumer update with 0 */
5736 if (CHIP_INT_MODE_IS_BC(bp)) {
5737 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5738 USTORM_ID, 0, IGU_INT_NOP, 1);
5739 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5740 CSTORM_ID, 0, IGU_INT_NOP, 1);
5741 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5742 XSTORM_ID, 0, IGU_INT_NOP, 1);
5743 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5744 TSTORM_ID, 0, IGU_INT_NOP, 1);
5745 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5746 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5747 } else {
5748 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5749 USTORM_ID, 0, IGU_INT_NOP, 1);
5750 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5751 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5752 }
5753 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5754
5755 /* !!! these should become driver const once
5756 rf-tool supports split-68 const */
5757 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5758 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5759 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5760 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5761 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5762 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5763 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005764 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005765
Eliezer Tamirc14423f2008-02-28 11:49:42 -08005766 /* Reset PCIE errors for debug */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005767 REG_WR(bp, 0x2114, 0xffffffff);
5768 REG_WR(bp, 0x2120, 0xffffffff);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005769
5770 bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5771 bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5772 bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5773 bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5774 bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5775 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5776
Vladislav Zolotarovf4a66892010-10-19 05:13:09 +00005777 if (CHIP_IS_E1x(bp)) {
5778 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
5779 main_mem_base = HC_REG_MAIN_MEMORY +
5780 BP_PORT(bp) * (main_mem_size * 4);
5781 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
5782 main_mem_width = 8;
5783
5784 val = REG_RD(bp, main_mem_prty_clr);
5785 if (val)
5786 DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
5787 "block during "
5788 "function init (0x%x)!\n", val);
5789
5790 /* Clear "false" parity errors in MSI-X table */
5791 for (i = main_mem_base;
5792 i < main_mem_base + main_mem_size * 4;
5793 i += main_mem_width) {
5794 bnx2x_read_dmae(bp, i, main_mem_width / 4);
5795 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
5796 i, main_mem_width / 4);
5797 }
5798 /* Clear HC parity attention */
5799 REG_RD(bp, main_mem_prty_clr);
5800 }
5801
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00005802 bnx2x_phy_probe(&bp->link_params);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005803
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005804 return 0;
5805}
5806
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005807int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005808{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005809 int rc = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005810
5811 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005812 BP_ABS_FUNC(bp), load_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005813
5814 bp->dmae_ready = 0;
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08005815 spin_lock_init(&bp->dmae_lock);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005816
5817 switch (load_code) {
5818 case FW_MSG_CODE_DRV_LOAD_COMMON:
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005819 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005820 rc = bnx2x_init_hw_common(bp, load_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005821 if (rc)
5822 goto init_hw_err;
5823 /* no break */
5824
5825 case FW_MSG_CODE_DRV_LOAD_PORT:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005826 rc = bnx2x_init_hw_port(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005827 if (rc)
5828 goto init_hw_err;
5829 /* no break */
5830
5831 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005832 rc = bnx2x_init_hw_func(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005833 if (rc)
5834 goto init_hw_err;
5835 break;
5836
5837 default:
5838 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5839 break;
5840 }
5841
5842 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005843 int mb_idx = BP_FW_MB_IDX(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005844
5845 bp->fw_drv_pulse_wr_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005846 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005847 DRV_PULSE_SEQ_MASK);
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00005848 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5849 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005850
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005851init_hw_err:
5852 bnx2x_gunzip_end(bp);
5853
5854 return rc;
5855}
5856
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005857void bnx2x_free_mem(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005858{
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00005859 bnx2x_gunzip_end(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005860
5861 /* fastpath */
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00005862 bnx2x_free_fp_mem(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005863 /* end of fastpath */
5864
5865 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005866 sizeof(struct host_sp_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005867
5868 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005869 sizeof(struct bnx2x_slowpath));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005870
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005871 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5872 bp->context.size);
5873
5874 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5875
5876 BNX2X_FREE(bp->ilt->lines);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005877
Michael Chan37b091b2009-10-10 13:46:55 +00005878#ifdef BCM_CNIC
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005879 if (CHIP_IS_E2(bp))
5880 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
5881 sizeof(struct host_hc_status_block_e2));
5882 else
5883 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5884 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005885
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005886 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005887#endif
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005888
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005889 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005890
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005891 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
5892 BCM_PAGE_SIZE * NUM_EQ_PAGES);
5893
Tom Herbertab532cf2011-02-16 10:27:02 +00005894 BNX2X_FREE(bp->rx_indir_table);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005895}
5896
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005897
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005898int bnx2x_alloc_mem(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005899{
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00005900 if (bnx2x_gunzip_init(bp))
5901 return -ENOMEM;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005902
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005903#ifdef BCM_CNIC
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005904 if (CHIP_IS_E2(bp))
5905 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
5906 sizeof(struct host_hc_status_block_e2));
5907 else
5908 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
5909 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005910
5911 /* allocate searcher T2 table */
5912 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
5913#endif
5914
5915
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005916 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005917 sizeof(struct host_sp_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005918
5919 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5920 sizeof(struct bnx2x_slowpath));
5921
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005922 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005923
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005924 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
5925 bp->context.size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005926
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005927 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005928
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005929 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
5930 goto alloc_mem_err;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005931
5932 /* Slow path ring */
5933 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5934
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005935 /* EQ */
5936 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
5937 BCM_PAGE_SIZE * NUM_EQ_PAGES);
Tom Herbertab532cf2011-02-16 10:27:02 +00005938
5939 BNX2X_ALLOC(bp->rx_indir_table, sizeof(bp->rx_indir_table[0]) *
5940 TSTORM_INDIRECTION_TABLE_SIZE);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00005941
5942 /* fastpath */
5943 /* need to be done at the end, since it's self adjusting to amount
5944 * of memory available for RSS queues
5945 */
5946 if (bnx2x_alloc_fp_mem(bp))
5947 goto alloc_mem_err;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005948 return 0;
5949
5950alloc_mem_err:
5951 bnx2x_free_mem(bp);
5952 return -ENOMEM;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005953}
5954
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005955/*
5956 * Init service functions
5957 */
stephen hemminger8d962862010-10-21 07:50:56 +00005958static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
5959 int *state_p, int flags);
5960
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005961int bnx2x_func_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005962{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005963 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005964
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005965 /* Wait for completion */
5966 return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
5967 WAIT_RAMROD_COMMON);
5968}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005969
stephen hemminger8d962862010-10-21 07:50:56 +00005970static int bnx2x_func_stop(struct bnx2x *bp)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005971{
5972 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005973
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005974 /* Wait for completion */
5975 return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
5976 0, &(bp->state), WAIT_RAMROD_COMMON);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005977}
5978
Michael Chane665bfd2009-10-10 13:46:54 +00005979/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00005980 * bnx2x_set_mac_addr_gen - set a MAC in a CAM for a few L2 Clients for E1x chips
Michael Chane665bfd2009-10-10 13:46:54 +00005981 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00005982 * @bp: driver handle
5983 * @set: set or clear an entry (1 or 0)
5984 * @mac: pointer to a buffer containing a MAC
5985 * @cl_bit_vec: bit vector of clients to register a MAC for
5986 * @cam_offset: offset in a CAM to use
5987 * @is_bcast: is the set MAC a broadcast address (for E1 only)
Michael Chane665bfd2009-10-10 13:46:54 +00005988 */
Joe Perches215faf92010-12-21 02:16:10 -08005989static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005990 u32 cl_bit_vec, u8 cam_offset,
5991 u8 is_bcast)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005992{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005993 struct mac_configuration_cmd *config =
5994 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
5995 int ramrod_flags = WAIT_RAMROD_COMMON;
5996
5997 bp->set_mac_pending = 1;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005998
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005999 config->hdr.length = 1;
Michael Chane665bfd2009-10-10 13:46:54 +00006000 config->hdr.offset = cam_offset;
6001 config->hdr.client_id = 0xff;
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08006002 /* Mark the single MAC configuration ramrod as opposed to a
6003 * UC/MC list configuration).
6004 */
6005 config->hdr.echo = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006006
6007 /* primary MAC */
6008 config->config_table[0].msb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00006009 swab16(*(u16 *)&mac[0]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006010 config->config_table[0].middle_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00006011 swab16(*(u16 *)&mac[2]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006012 config->config_table[0].lsb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00006013 swab16(*(u16 *)&mac[4]);
Eilon Greensteinca003922009-08-12 22:53:28 -07006014 config->config_table[0].clients_bit_vector =
Michael Chane665bfd2009-10-10 13:46:54 +00006015 cpu_to_le32(cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006016 config->config_table[0].vlan_id = 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006017 config->config_table[0].pf_id = BP_FUNC(bp);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006018 if (set)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006019 SET_FLAG(config->config_table[0].flags,
6020 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6021 T_ETH_MAC_COMMAND_SET);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006022 else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006023 SET_FLAG(config->config_table[0].flags,
6024 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6025 T_ETH_MAC_COMMAND_INVALIDATE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006026
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006027 if (is_bcast)
6028 SET_FLAG(config->config_table[0].flags,
6029 MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
6030
6031 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006032 (set ? "setting" : "clearing"),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006033 config->config_table[0].msb_mac_addr,
6034 config->config_table[0].middle_mac_addr,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006035 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006036
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08006037 mb();
6038
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006039 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006040 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006041 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
6042
6043 /* Wait for a completion */
6044 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006045}
6046
stephen hemminger8d962862010-10-21 07:50:56 +00006047static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6048 int *state_p, int flags)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006049{
6050 /* can take a while if any port is running */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006051 int cnt = 5000;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006052 u8 poll = flags & WAIT_RAMROD_POLL;
6053 u8 common = flags & WAIT_RAMROD_COMMON;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006054
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006055 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6056 poll ? "polling" : "waiting", state, idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006057
6058 might_sleep();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006059 while (cnt--) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006060 if (poll) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006061 if (common)
6062 bnx2x_eq_int(bp);
6063 else {
6064 bnx2x_rx_int(bp->fp, 10);
6065 /* if index is different from 0
6066 * the reply for some commands will
6067 * be on the non default queue
6068 */
6069 if (idx)
6070 bnx2x_rx_int(&bp->fp[idx], 10);
6071 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006072 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006073
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006074 mb(); /* state is changed by bnx2x_sp_event() */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006075 if (*state_p == state) {
6076#ifdef BNX2X_STOP_ON_ERROR
6077 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6078#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006079 return 0;
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006080 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006081
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006082 msleep(1);
Eilon Greensteine3553b22009-08-12 08:23:31 +00006083
6084 if (bp->panic)
6085 return -EIO;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006086 }
6087
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006088 /* timeout! */
Eliezer Tamir49d66772008-02-28 11:53:13 -08006089 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6090 poll ? "polling" : "waiting", state, idx);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006091#ifdef BNX2X_STOP_ON_ERROR
6092 bnx2x_panic();
6093#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006094
Eliezer Tamir49d66772008-02-28 11:53:13 -08006095 return -EBUSY;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006096}
6097
stephen hemminger8d962862010-10-21 07:50:56 +00006098static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
Michael Chane665bfd2009-10-10 13:46:54 +00006099{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006100 if (CHIP_IS_E1H(bp))
6101 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6102 else if (CHIP_MODE_IS_4_PORT(bp))
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08006103 return E2_FUNC_MAX * rel_offset + BP_FUNC(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006104 else
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08006105 return E2_FUNC_MAX * rel_offset + BP_VN(bp);
Michael Chane665bfd2009-10-10 13:46:54 +00006106}
6107
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08006108/**
6109 * LLH CAM line allocations: currently only iSCSI and ETH macs are
6110 * relevant. In addition, current implementation is tuned for a
6111 * single ETH MAC.
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08006112 */
6113enum {
6114 LLH_CAM_ISCSI_ETH_LINE = 0,
6115 LLH_CAM_ETH_LINE,
6116 LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE
6117};
6118
6119static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
6120 int set,
6121 unsigned char *dev_addr,
6122 int index)
6123{
6124 u32 wb_data[2];
6125 u32 mem_offset, ena_offset, mem_index;
6126 /**
6127 * indexes mapping:
6128 * 0..7 - goes to MEM
6129 * 8..15 - goes to MEM2
6130 */
6131
6132 if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
6133 return;
6134
6135 /* calculate memory start offset according to the mapping
6136 * and index in the memory */
6137 if (index < NIG_LLH_FUNC_MEM_MAX_OFFSET) {
6138 mem_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
6139 NIG_REG_LLH0_FUNC_MEM;
6140 ena_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
6141 NIG_REG_LLH0_FUNC_MEM_ENABLE;
6142 mem_index = index;
6143 } else {
6144 mem_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2 :
6145 NIG_REG_P0_LLH_FUNC_MEM2;
6146 ena_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE :
6147 NIG_REG_P0_LLH_FUNC_MEM2_ENABLE;
6148 mem_index = index - NIG_LLH_FUNC_MEM_MAX_OFFSET;
6149 }
6150
6151 if (set) {
6152 /* LLH_FUNC_MEM is a u64 WB register */
6153 mem_offset += 8*mem_index;
6154
6155 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
6156 (dev_addr[4] << 8) | dev_addr[5]);
6157 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
6158
6159 REG_WR_DMAE(bp, mem_offset, wb_data, 2);
6160 }
6161
6162 /* enable/disable the entry */
6163 REG_WR(bp, ena_offset + 4*mem_index, set);
6164
6165}
6166
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006167void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
Michael Chane665bfd2009-10-10 13:46:54 +00006168{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006169 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
6170 bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
6171
6172 /* networking MAC */
6173 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6174 (1 << bp->fp->cl_id), cam_offset , 0);
6175
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08006176 bnx2x_set_mac_in_nig(bp, set, bp->dev->dev_addr, LLH_CAM_ETH_LINE);
6177
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006178 if (CHIP_IS_E1(bp)) {
6179 /* broadcast MAC */
Joe Perches215faf92010-12-21 02:16:10 -08006180 static const u8 bcast[ETH_ALEN] = {
6181 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
6182 };
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006183 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6184 }
6185}
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08006186
6187static inline u8 bnx2x_e1_cam_mc_offset(struct bnx2x *bp)
6188{
6189 return CHIP_REV_IS_SLOW(bp) ?
6190 (BNX2X_MAX_EMUL_MULTI * (1 + BP_PORT(bp))) :
6191 (BNX2X_MAX_MULTICAST * (1 + BP_PORT(bp)));
6192}
6193
6194/* set mc list, do not wait as wait implies sleep and
6195 * set_rx_mode can be invoked from non-sleepable context.
6196 *
6197 * Instead we use the same ramrod data buffer each time we need
6198 * to configure a list of addresses, and use the fact that the
6199 * list of MACs is changed in an incremental way and that the
6200 * function is called under the netif_addr_lock. A temporary
6201 * inconsistent CAM configuration (possible in case of a very fast
6202 * sequence of add/del/add on the host side) will shortly be
6203 * restored by the handler of the last ramrod.
6204 */
6205static int bnx2x_set_e1_mc_list(struct bnx2x *bp)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006206{
6207 int i = 0, old;
6208 struct net_device *dev = bp->dev;
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08006209 u8 offset = bnx2x_e1_cam_mc_offset(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006210 struct netdev_hw_addr *ha;
6211 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6212 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6213
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08006214 if (netdev_mc_count(dev) > BNX2X_MAX_MULTICAST)
6215 return -EINVAL;
6216
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006217 netdev_for_each_mc_addr(ha, dev) {
6218 /* copy mac */
6219 config_cmd->config_table[i].msb_mac_addr =
6220 swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
6221 config_cmd->config_table[i].middle_mac_addr =
6222 swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
6223 config_cmd->config_table[i].lsb_mac_addr =
6224 swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
6225
6226 config_cmd->config_table[i].vlan_id = 0;
6227 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
6228 config_cmd->config_table[i].clients_bit_vector =
6229 cpu_to_le32(1 << BP_L_ID(bp));
6230
6231 SET_FLAG(config_cmd->config_table[i].flags,
6232 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6233 T_ETH_MAC_COMMAND_SET);
6234
6235 DP(NETIF_MSG_IFUP,
6236 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6237 config_cmd->config_table[i].msb_mac_addr,
6238 config_cmd->config_table[i].middle_mac_addr,
6239 config_cmd->config_table[i].lsb_mac_addr);
6240 i++;
6241 }
6242 old = config_cmd->hdr.length;
6243 if (old > i) {
6244 for (; i < old; i++) {
6245 if (CAM_IS_INVALID(config_cmd->
6246 config_table[i])) {
6247 /* already invalidated */
6248 break;
6249 }
6250 /* invalidate */
6251 SET_FLAG(config_cmd->config_table[i].flags,
6252 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6253 T_ETH_MAC_COMMAND_INVALIDATE);
6254 }
6255 }
6256
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08006257 wmb();
6258
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006259 config_cmd->hdr.length = i;
6260 config_cmd->hdr.offset = offset;
6261 config_cmd->hdr.client_id = 0xff;
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08006262 /* Mark that this ramrod doesn't use bp->set_mac_pending for
6263 * synchronization.
6264 */
6265 config_cmd->hdr.echo = 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006266
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08006267 mb();
Michael Chane665bfd2009-10-10 13:46:54 +00006268
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08006269 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006270 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6271}
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08006272
6273void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006274{
6275 int i;
6276 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6277 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6278 int ramrod_flags = WAIT_RAMROD_COMMON;
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08006279 u8 offset = bnx2x_e1_cam_mc_offset(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006280
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08006281 for (i = 0; i < BNX2X_MAX_MULTICAST; i++)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006282 SET_FLAG(config_cmd->config_table[i].flags,
6283 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6284 T_ETH_MAC_COMMAND_INVALIDATE);
6285
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08006286 wmb();
6287
6288 config_cmd->hdr.length = BNX2X_MAX_MULTICAST;
6289 config_cmd->hdr.offset = offset;
6290 config_cmd->hdr.client_id = 0xff;
6291 /* We'll wait for a completion this time... */
6292 config_cmd->hdr.echo = 1;
6293
6294 bp->set_mac_pending = 1;
6295
6296 mb();
6297
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006298 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6299 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
Michael Chane665bfd2009-10-10 13:46:54 +00006300
6301 /* Wait for a completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006302 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
6303 ramrod_flags);
6304
Michael Chane665bfd2009-10-10 13:46:54 +00006305}
6306
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08006307/* Accept one or more multicasts */
6308static int bnx2x_set_e1h_mc_list(struct bnx2x *bp)
6309{
6310 struct net_device *dev = bp->dev;
6311 struct netdev_hw_addr *ha;
6312 u32 mc_filter[MC_HASH_SIZE];
6313 u32 crc, bit, regidx;
6314 int i;
6315
6316 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
6317
6318 netdev_for_each_mc_addr(ha, dev) {
6319 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
6320 bnx2x_mc_addr(ha));
6321
6322 crc = crc32c_le(0, bnx2x_mc_addr(ha),
6323 ETH_ALEN);
6324 bit = (crc >> 24) & 0xff;
6325 regidx = bit >> 5;
6326 bit &= 0x1f;
6327 mc_filter[regidx] |= (1 << bit);
6328 }
6329
6330 for (i = 0; i < MC_HASH_SIZE; i++)
6331 REG_WR(bp, MC_HASH_OFFSET(bp, i),
6332 mc_filter[i]);
6333
6334 return 0;
6335}
6336
6337void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp)
6338{
6339 int i;
6340
6341 for (i = 0; i < MC_HASH_SIZE; i++)
6342 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6343}
6344
Michael Chan993ac7b2009-10-10 13:46:56 +00006345#ifdef BCM_CNIC
6346/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00006347 * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
Michael Chan993ac7b2009-10-10 13:46:56 +00006348 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00006349 * @bp: driver handle
6350 * @set: set or clear the CAM entry
Michael Chan993ac7b2009-10-10 13:46:56 +00006351 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00006352 * This function will wait until the ramdord completion returns.
6353 * Return 0 if success, -ENODEV if ramrod doesn't return.
Michael Chan993ac7b2009-10-10 13:46:56 +00006354 */
stephen hemminger8d962862010-10-21 07:50:56 +00006355static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
Michael Chan993ac7b2009-10-10 13:46:56 +00006356{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006357 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
6358 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006359 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
6360 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006361 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00006362 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
Michael Chan993ac7b2009-10-10 13:46:56 +00006363
6364 /* Send a SET_MAC ramrod */
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00006365 bnx2x_set_mac_addr_gen(bp, set, iscsi_mac, cl_bit_vec,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006366 cam_offset, 0);
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08006367
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00006368 bnx2x_set_mac_in_nig(bp, set, iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006369
6370 return 0;
6371}
6372
6373/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00006374 * bnx2x_set_fip_eth_mac_addr - set FCoE L2 MAC(s)
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006375 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00006376 * @bp: driver handle
6377 * @set: set or clear the CAM entry
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006378 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00006379 * This function will wait until the ramrod completion returns.
6380 * Returns 0 if success, -ENODEV if ramrod doesn't return.
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006381 */
6382int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set)
6383{
6384 u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
6385 /**
6386 * CAM allocation for E1H
6387 * eth unicasts: by func number
6388 * iscsi: by func number
6389 * fip unicast: by func number
6390 * fip multicast: by func number
6391 */
6392 bnx2x_set_mac_addr_gen(bp, set, bp->fip_mac,
6393 cl_bit_vec, bnx2x_e1h_cam_offset(bp, CAM_FIP_ETH_LINE), 0);
6394
6395 return 0;
6396}
6397
6398int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set)
6399{
6400 u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
6401
6402 /**
6403 * CAM allocation for E1H
6404 * eth unicasts: by func number
6405 * iscsi: by func number
6406 * fip unicast: by func number
6407 * fip multicast: by func number
6408 */
6409 bnx2x_set_mac_addr_gen(bp, set, ALL_ENODE_MACS, cl_bit_vec,
6410 bnx2x_e1h_cam_offset(bp, CAM_FIP_MCAST_LINE), 0);
6411
Michael Chan993ac7b2009-10-10 13:46:56 +00006412 return 0;
6413}
6414#endif
6415
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006416static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
6417 struct bnx2x_client_init_params *params,
6418 u8 activate,
6419 struct client_init_ramrod_data *data)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006420{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006421 /* Clear the buffer */
6422 memset(data, 0, sizeof(*data));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006423
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006424 /* general */
6425 data->general.client_id = params->rxq_params.cl_id;
6426 data->general.statistics_counter_id = params->rxq_params.stat_id;
6427 data->general.statistics_en_flg =
6428 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006429 data->general.is_fcoe_flg =
6430 (params->ramrod_params.flags & CLIENT_IS_FCOE) ? 1 : 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006431 data->general.activate_flg = activate;
6432 data->general.sp_client_id = params->rxq_params.spcl_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006433
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006434 /* Rx data */
6435 data->rx.tpa_en_flg =
6436 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
6437 data->rx.vmqueue_mode_en_flg = 0;
6438 data->rx.cache_line_alignment_log_size =
6439 params->rxq_params.cache_line_log;
6440 data->rx.enable_dynamic_hc =
6441 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
6442 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
6443 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
6444 data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
6445
6446 /* We don't set drop flags */
6447 data->rx.drop_ip_cs_err_flg = 0;
6448 data->rx.drop_tcp_cs_err_flg = 0;
6449 data->rx.drop_ttl0_flg = 0;
6450 data->rx.drop_udp_cs_err_flg = 0;
6451
6452 data->rx.inner_vlan_removal_enable_flg =
6453 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
6454 data->rx.outer_vlan_removal_enable_flg =
6455 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
6456 data->rx.status_block_id = params->rxq_params.fw_sb_id;
6457 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
6458 data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
6459 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
6460 data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
6461 data->rx.bd_page_base.lo =
6462 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
6463 data->rx.bd_page_base.hi =
6464 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
6465 data->rx.sge_page_base.lo =
6466 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
6467 data->rx.sge_page_base.hi =
6468 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
6469 data->rx.cqe_page_base.lo =
6470 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
6471 data->rx.cqe_page_base.hi =
6472 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
6473 data->rx.is_leading_rss =
6474 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
6475 data->rx.is_approx_mcast = data->rx.is_leading_rss;
6476
6477 /* Tx data */
6478 data->tx.enforce_security_flg = 0; /* VF specific */
6479 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
6480 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
6481 data->tx.mtu = 0; /* VF specific */
6482 data->tx.tx_bd_page_base.lo =
6483 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
6484 data->tx.tx_bd_page_base.hi =
6485 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
6486
6487 /* flow control data */
6488 data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
6489 data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
6490 data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
6491 data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
6492 data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
6493 data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
6494 data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
6495
6496 data->fc.safc_group_num = params->txq_params.cos;
6497 data->fc.safc_group_en_flg =
6498 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006499 data->fc.traffic_type =
6500 (params->ramrod_params.flags & CLIENT_IS_FCOE) ?
6501 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006502}
6503
6504static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6505{
6506 /* ustorm cxt validation */
6507 cxt->ustorm_ag_context.cdu_usage =
6508 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
6509 ETH_CONNECTION_TYPE);
6510 /* xcontext validation */
6511 cxt->xstorm_ag_context.cdu_reserved =
6512 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
6513 ETH_CONNECTION_TYPE);
6514}
6515
stephen hemminger8d962862010-10-21 07:50:56 +00006516static int bnx2x_setup_fw_client(struct bnx2x *bp,
6517 struct bnx2x_client_init_params *params,
6518 u8 activate,
6519 struct client_init_ramrod_data *data,
6520 dma_addr_t data_mapping)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006521{
6522 u16 hc_usec;
6523 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
6524 int ramrod_flags = 0, rc;
6525
6526 /* HC and context validation values */
6527 hc_usec = params->txq_params.hc_rate ?
6528 1000000 / params->txq_params.hc_rate : 0;
6529 bnx2x_update_coalesce_sb_index(bp,
6530 params->txq_params.fw_sb_id,
6531 params->txq_params.sb_cq_index,
6532 !(params->txq_params.flags & QUEUE_FLG_HC),
6533 hc_usec);
6534
6535 *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
6536
6537 hc_usec = params->rxq_params.hc_rate ?
6538 1000000 / params->rxq_params.hc_rate : 0;
6539 bnx2x_update_coalesce_sb_index(bp,
6540 params->rxq_params.fw_sb_id,
6541 params->rxq_params.sb_cq_index,
6542 !(params->rxq_params.flags & QUEUE_FLG_HC),
6543 hc_usec);
6544
6545 bnx2x_set_ctx_validation(params->rxq_params.cxt,
6546 params->rxq_params.cid);
6547
6548 /* zero stats */
6549 if (params->txq_params.flags & QUEUE_FLG_STATS)
6550 storm_memset_xstats_zero(bp, BP_PORT(bp),
6551 params->txq_params.stat_id);
6552
6553 if (params->rxq_params.flags & QUEUE_FLG_STATS) {
6554 storm_memset_ustats_zero(bp, BP_PORT(bp),
6555 params->rxq_params.stat_id);
6556 storm_memset_tstats_zero(bp, BP_PORT(bp),
6557 params->rxq_params.stat_id);
6558 }
6559
6560 /* Fill the ramrod data */
6561 bnx2x_fill_cl_init_data(bp, params, activate, data);
6562
6563 /* SETUP ramrod.
6564 *
6565 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6566 * barrier except from mmiowb() is needed to impose a
6567 * proper ordering of memory operations.
6568 */
6569 mmiowb();
6570
6571
6572 bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
6573 U64_HI(data_mapping), U64_LO(data_mapping), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006574
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006575 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006576 rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
6577 params->ramrod_params.index,
6578 params->ramrod_params.pstate,
6579 ramrod_flags);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006580 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006581}
6582
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006583/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00006584 * bnx2x_set_int_mode - configure interrupt mode
6585 *
6586 * @bp: driver handle
6587 *
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006588 * In case of MSI-X it will also try to enable MSI-X.
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006589 */
6590static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006591{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006592 int rc = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -07006593
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006594 switch (bp->int_mode) {
6595 case INT_MODE_MSI:
6596 bnx2x_enable_msi(bp);
6597 /* falling through... */
6598 case INT_MODE_INTx:
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006599 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006600 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
Eilon Greensteinca003922009-08-12 22:53:28 -07006601 break;
Eilon Greensteinca003922009-08-12 22:53:28 -07006602 default:
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006603 /* Set number of queues according to bp->multi_mode value */
6604 bnx2x_set_num_queues(bp);
6605
6606 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
6607 bp->num_queues);
6608
6609 /* if we can't use MSI-X we only need one fp,
6610 * so try to enable MSI-X with the requested number of fp's
6611 * and fallback to MSI or legacy INTx with one fp
6612 */
6613 rc = bnx2x_enable_msix(bp);
6614 if (rc) {
6615 /* failed to enable MSI-X */
6616 if (bp->multi_mode)
6617 DP(NETIF_MSG_IFUP,
6618 "Multi requested but failed to "
6619 "enable MSI-X (%d), "
6620 "set number of queues to %d\n",
6621 bp->num_queues,
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006622 1 + NONE_ETH_CONTEXT_USE);
6623 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006624
6625 if (!(bp->flags & DISABLE_MSI_FLAG))
6626 bnx2x_enable_msi(bp);
6627 }
6628
Eilon Greensteinca003922009-08-12 22:53:28 -07006629 break;
6630 }
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006631
6632 return rc;
Eilon Greensteinca003922009-08-12 22:53:28 -07006633}
6634
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00006635/* must be called prioir to any HW initializations */
6636static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
6637{
6638 return L2_ILT_LINES(bp);
6639}
6640
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006641void bnx2x_ilt_set_info(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006642{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006643 struct ilt_client_info *ilt_client;
6644 struct bnx2x_ilt *ilt = BP_ILT(bp);
6645 u16 line = 0;
6646
6647 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
6648 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
6649
6650 /* CDU */
6651 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6652 ilt_client->client_num = ILT_CLIENT_CDU;
6653 ilt_client->page_size = CDU_ILT_PAGE_SZ;
6654 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6655 ilt_client->start = line;
6656 line += L2_ILT_LINES(bp);
6657#ifdef BCM_CNIC
6658 line += CNIC_ILT_LINES;
6659#endif
6660 ilt_client->end = line - 1;
6661
6662 DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6663 "flags 0x%x, hw psz %d\n",
6664 ilt_client->start,
6665 ilt_client->end,
6666 ilt_client->page_size,
6667 ilt_client->flags,
6668 ilog2(ilt_client->page_size >> 12));
6669
6670 /* QM */
6671 if (QM_INIT(bp->qm_cid_count)) {
6672 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6673 ilt_client->client_num = ILT_CLIENT_QM;
6674 ilt_client->page_size = QM_ILT_PAGE_SZ;
6675 ilt_client->flags = 0;
6676 ilt_client->start = line;
6677
6678 /* 4 bytes for each cid */
6679 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6680 QM_ILT_PAGE_SZ);
6681
6682 ilt_client->end = line - 1;
6683
6684 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6685 "flags 0x%x, hw psz %d\n",
6686 ilt_client->start,
6687 ilt_client->end,
6688 ilt_client->page_size,
6689 ilt_client->flags,
6690 ilog2(ilt_client->page_size >> 12));
6691
6692 }
6693 /* SRC */
6694 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6695#ifdef BCM_CNIC
6696 ilt_client->client_num = ILT_CLIENT_SRC;
6697 ilt_client->page_size = SRC_ILT_PAGE_SZ;
6698 ilt_client->flags = 0;
6699 ilt_client->start = line;
6700 line += SRC_ILT_LINES;
6701 ilt_client->end = line - 1;
6702
6703 DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6704 "flags 0x%x, hw psz %d\n",
6705 ilt_client->start,
6706 ilt_client->end,
6707 ilt_client->page_size,
6708 ilt_client->flags,
6709 ilog2(ilt_client->page_size >> 12));
6710
6711#else
6712 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6713#endif
6714
6715 /* TM */
6716 ilt_client = &ilt->clients[ILT_CLIENT_TM];
6717#ifdef BCM_CNIC
6718 ilt_client->client_num = ILT_CLIENT_TM;
6719 ilt_client->page_size = TM_ILT_PAGE_SZ;
6720 ilt_client->flags = 0;
6721 ilt_client->start = line;
6722 line += TM_ILT_LINES;
6723 ilt_client->end = line - 1;
6724
6725 DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6726 "flags 0x%x, hw psz %d\n",
6727 ilt_client->start,
6728 ilt_client->end,
6729 ilt_client->page_size,
6730 ilt_client->flags,
6731 ilog2(ilt_client->page_size >> 12));
6732
6733#else
6734 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6735#endif
6736}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006737
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006738int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6739 int is_leading)
6740{
6741 struct bnx2x_client_init_params params = { {0} };
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006742 int rc;
6743
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006744 /* reset IGU state skip FCoE L2 queue */
6745 if (!IS_FCOE_FP(fp))
6746 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006747 IGU_INT_ENABLE, 0);
6748
6749 params.ramrod_params.pstate = &fp->state;
6750 params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
6751 params.ramrod_params.index = fp->index;
6752 params.ramrod_params.cid = fp->cid;
6753
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006754#ifdef BCM_CNIC
6755 if (IS_FCOE_FP(fp))
6756 params.ramrod_params.flags |= CLIENT_IS_FCOE;
6757
6758#endif
6759
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006760 if (is_leading)
6761 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
6762
6763 bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
6764
6765 bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
6766
6767 rc = bnx2x_setup_fw_client(bp, &params, 1,
6768 bnx2x_sp(bp, client_init_data),
6769 bnx2x_sp_mapping(bp, client_init_data));
6770 return rc;
6771}
6772
stephen hemminger8d962862010-10-21 07:50:56 +00006773static int bnx2x_stop_fw_client(struct bnx2x *bp,
6774 struct bnx2x_client_ramrod_params *p)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006775{
6776 int rc;
6777
6778 int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
6779
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006780 /* halt the connection */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006781 *p->pstate = BNX2X_FP_STATE_HALTING;
6782 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
6783 p->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006784
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006785 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006786 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
6787 p->pstate, poll_flag);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006788 if (rc) /* timeout */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006789 return rc;
6790
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006791 *p->pstate = BNX2X_FP_STATE_TERMINATING;
6792 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
6793 p->cl_id, 0);
6794 /* Wait for completion */
6795 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
6796 p->pstate, poll_flag);
6797 if (rc) /* timeout */
6798 return rc;
6799
6800
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006801 /* delete cfc entry */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006802 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006803
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006804 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006805 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
6806 p->pstate, WAIT_RAMROD_COMMON);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006807 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006808}
6809
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006810static int bnx2x_stop_client(struct bnx2x *bp, int index)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006811{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006812 struct bnx2x_client_ramrod_params client_stop = {0};
6813 struct bnx2x_fastpath *fp = &bp->fp[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006814
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006815 client_stop.index = index;
6816 client_stop.cid = fp->cid;
6817 client_stop.cl_id = fp->cl_id;
6818 client_stop.pstate = &(fp->state);
6819 client_stop.poll = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006820
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006821 return bnx2x_stop_fw_client(bp, &client_stop);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006822}
6823
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006824
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006825static void bnx2x_reset_func(struct bnx2x *bp)
6826{
6827 int port = BP_PORT(bp);
6828 int func = BP_FUNC(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006829 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006830 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006831 (CHIP_IS_E2(bp) ?
6832 offsetof(struct hc_status_block_data_e2, common) :
6833 offsetof(struct hc_status_block_data_e1x, common));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006834 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
6835 int pfid_offset = offsetof(struct pci_entity, pf_id);
6836
6837 /* Disable the function in the FW */
6838 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
6839 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
6840 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
6841 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
6842
6843 /* FP SBs */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006844 for_each_eth_queue(bp, i) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006845 struct bnx2x_fastpath *fp = &bp->fp[i];
6846 REG_WR8(bp,
6847 BAR_CSTRORM_INTMEM +
6848 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
6849 + pfunc_offset_fp + pfid_offset,
6850 HC_FUNCTION_DISABLED);
6851 }
6852
6853 /* SP SB */
6854 REG_WR8(bp,
6855 BAR_CSTRORM_INTMEM +
6856 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
6857 pfunc_offset_sp + pfid_offset,
6858 HC_FUNCTION_DISABLED);
6859
6860
6861 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
6862 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
6863 0);
Eliezer Tamir49d66772008-02-28 11:53:13 -08006864
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006865 /* Configure IGU */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006866 if (bp->common.int_block == INT_BLOCK_HC) {
6867 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6868 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6869 } else {
6870 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
6871 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
6872 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006873
Michael Chan37b091b2009-10-10 13:46:55 +00006874#ifdef BCM_CNIC
6875 /* Disable Timer scan */
6876 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
6877 /*
6878 * Wait for at least 10ms and up to 2 second for the timers scan to
6879 * complete
6880 */
6881 for (i = 0; i < 200; i++) {
6882 msleep(10);
6883 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
6884 break;
6885 }
6886#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006887 /* Clear ILT */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006888 bnx2x_clear_func_ilt(bp, func);
6889
6890 /* Timers workaround bug for E2: if this is vnic-3,
6891 * we need to set the entire ilt range for this timers.
6892 */
6893 if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
6894 struct ilt_client_info ilt_cli;
6895 /* use dummy TM client */
6896 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6897 ilt_cli.start = 0;
6898 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6899 ilt_cli.client_num = ILT_CLIENT_TM;
6900
6901 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
6902 }
6903
6904 /* this assumes that reset_port() called before reset_func()*/
6905 if (CHIP_IS_E2(bp))
6906 bnx2x_pf_disable(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006907
6908 bp->dmae_ready = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006909}
6910
6911static void bnx2x_reset_port(struct bnx2x *bp)
6912{
6913 int port = BP_PORT(bp);
6914 u32 val;
6915
6916 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6917
6918 /* Do not rcv packets to BRB */
6919 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6920 /* Do not direct rcv packets that are not for MCP to the BRB */
6921 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6922 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6923
6924 /* Configure AEU */
6925 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6926
6927 msleep(100);
6928 /* Check for BRB port occupancy */
6929 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6930 if (val)
6931 DP(NETIF_MSG_IFDOWN,
Eilon Greenstein33471622008-08-13 15:59:08 -07006932 "BRB1 is not empty %d blocks are occupied\n", val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006933
6934 /* TODO: Close Doorbell port? */
6935}
6936
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006937static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6938{
6939 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006940 BP_ABS_FUNC(bp), reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006941
6942 switch (reset_code) {
6943 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6944 bnx2x_reset_port(bp);
6945 bnx2x_reset_func(bp);
6946 bnx2x_reset_common(bp);
6947 break;
6948
6949 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6950 bnx2x_reset_port(bp);
6951 bnx2x_reset_func(bp);
6952 break;
6953
6954 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6955 bnx2x_reset_func(bp);
6956 break;
6957
6958 default:
6959 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6960 break;
6961 }
6962}
6963
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006964#ifdef BCM_CNIC
6965static inline void bnx2x_del_fcoe_eth_macs(struct bnx2x *bp)
6966{
6967 if (bp->flags & FCOE_MACS_SET) {
6968 if (!IS_MF_SD(bp))
6969 bnx2x_set_fip_eth_mac_addr(bp, 0);
6970
6971 bnx2x_set_all_enode_macs(bp, 0);
6972
6973 bp->flags &= ~FCOE_MACS_SET;
6974 }
6975}
6976#endif
6977
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00006978void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006979{
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006980 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006981 u32 reset_code = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006982 int i, cnt, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006983
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006984 /* Wait until tx fastpath tasks complete */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006985 for_each_tx_queue(bp, i) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08006986 struct bnx2x_fastpath *fp = &bp->fp[i];
6987
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006988 cnt = 1000;
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -08006989 while (bnx2x_has_tx_work_unload(fp)) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006990
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006991 if (!cnt) {
6992 BNX2X_ERR("timeout waiting for queue[%d]\n",
6993 i);
6994#ifdef BNX2X_STOP_ON_ERROR
6995 bnx2x_panic();
6996 return -EBUSY;
6997#else
6998 break;
6999#endif
7000 }
7001 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007002 msleep(1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007003 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08007004 }
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007005 /* Give HW time to discard old tx messages */
7006 msleep(1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007007
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08007008 bnx2x_set_eth_mac(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007009
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08007010 bnx2x_invalidate_uc_list(bp);
7011
7012 if (CHIP_IS_E1(bp))
7013 bnx2x_invalidate_e1_mc_list(bp);
7014 else {
7015 bnx2x_invalidate_e1h_mc_list(bp);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007016 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007017 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007018
Michael Chan993ac7b2009-10-10 13:46:56 +00007019#ifdef BCM_CNIC
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00007020 bnx2x_del_fcoe_eth_macs(bp);
Michael Chan993ac7b2009-10-10 13:46:56 +00007021#endif
Yitchak Gertner65abd742008-08-25 15:26:24 -07007022
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007023 if (unload_mode == UNLOAD_NORMAL)
7024 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007025
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00007026 else if (bp->flags & NO_WOL_FLAG)
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007027 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007028
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00007029 else if (bp->wol) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007030 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007031 u8 *mac_addr = bp->dev->dev_addr;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007032 u32 val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007033 /* The mac address is written to entries 1-4 to
7034 preserve entry 0 which is used by the PMF */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007035 u8 entry = (BP_E1HVN(bp) + 1)*8;
7036
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007037 val = (mac_addr[0] << 8) | mac_addr[1];
Eilon Greenstein3196a882008-08-13 15:58:49 -07007038 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007039
7040 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7041 (mac_addr[4] << 8) | mac_addr[5];
Eilon Greenstein3196a882008-08-13 15:58:49 -07007042 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007043
7044 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007045
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007046 } else
7047 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7048
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007049 /* Close multi and leading connections
7050 Completions for ramrods are collected in a synchronous way */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007051 for_each_queue(bp, i)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007052
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007053 if (bnx2x_stop_client(bp, i))
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007054#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007055 return;
7056#else
7057 goto unload_error;
7058#endif
7059
7060 rc = bnx2x_func_stop(bp);
7061 if (rc) {
7062 BNX2X_ERR("Function stop failed!\n");
7063#ifdef BNX2X_STOP_ON_ERROR
7064 return;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007065#else
7066 goto unload_error;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007067#endif
Eliezer Tamir228241e2008-02-28 11:56:57 -08007068 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007069#ifndef BNX2X_STOP_ON_ERROR
Eliezer Tamir228241e2008-02-28 11:56:57 -08007070unload_error:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007071#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007072 if (!BP_NOMCP(bp))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007073 reset_code = bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007074 else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007075 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
7076 "%d, %d, %d\n", BP_PATH(bp),
7077 load_count[BP_PATH(bp)][0],
7078 load_count[BP_PATH(bp)][1],
7079 load_count[BP_PATH(bp)][2]);
7080 load_count[BP_PATH(bp)][0]--;
7081 load_count[BP_PATH(bp)][1 + port]--;
7082 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
7083 "%d, %d, %d\n", BP_PATH(bp),
7084 load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
7085 load_count[BP_PATH(bp)][2]);
7086 if (load_count[BP_PATH(bp)][0] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007087 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007088 else if (load_count[BP_PATH(bp)][1 + port] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007089 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7090 else
7091 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7092 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007093
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007094 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7095 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7096 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007097
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007098 /* Disable HW interrupts, NAPI */
7099 bnx2x_netif_stop(bp, 1);
7100
7101 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00007102 bnx2x_free_irq(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007103
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007104 /* Reset the chip */
Eliezer Tamir228241e2008-02-28 11:56:57 -08007105 bnx2x_reset_chip(bp, reset_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007106
7107 /* Report UNLOAD_DONE to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007108 if (!BP_NOMCP(bp))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007109 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
Eilon Greenstein356e2382009-02-12 08:38:32 +00007110
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007111}
7112
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00007113void bnx2x_disable_close_the_gate(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007114{
7115 u32 val;
7116
7117 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
7118
7119 if (CHIP_IS_E1(bp)) {
7120 int port = BP_PORT(bp);
7121 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7122 MISC_REG_AEU_MASK_ATTN_FUNC_0;
7123
7124 val = REG_RD(bp, addr);
7125 val &= ~(0x300);
7126 REG_WR(bp, addr, val);
7127 } else if (CHIP_IS_E1H(bp)) {
7128 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
7129 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
7130 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
7131 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
7132 }
7133}
7134
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007135/* Close gates #2, #3 and #4: */
7136static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
7137{
7138 u32 val, addr;
7139
7140 /* Gates #2 and #4a are closed/opened for "not E1" only */
7141 if (!CHIP_IS_E1(bp)) {
7142 /* #4 */
7143 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
7144 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
7145 close ? (val | 0x1) : (val & (~(u32)1)));
7146 /* #2 */
7147 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
7148 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
7149 close ? (val | 0x1) : (val & (~(u32)1)));
7150 }
7151
7152 /* #3 */
7153 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
7154 val = REG_RD(bp, addr);
7155 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
7156
7157 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
7158 close ? "closing" : "opening");
7159 mmiowb();
7160}
7161
7162#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
7163
7164static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
7165{
7166 /* Do some magic... */
7167 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7168 *magic_val = val & SHARED_MF_CLP_MAGIC;
7169 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
7170}
7171
Dmitry Kravkove8920672011-05-04 23:52:40 +00007172/**
7173 * bnx2x_clp_reset_done - restore the value of the `magic' bit.
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007174 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00007175 * @bp: driver handle
7176 * @magic_val: old value of the `magic' bit.
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007177 */
7178static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
7179{
7180 /* Restore the `magic' bit value... */
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007181 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7182 MF_CFG_WR(bp, shared_mf_config.clp_mb,
7183 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
7184}
7185
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007186/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00007187 * bnx2x_reset_mcp_prep - prepare for MCP reset.
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007188 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00007189 * @bp: driver handle
7190 * @magic_val: old value of 'magic' bit.
7191 *
7192 * Takes care of CLP configurations.
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007193 */
7194static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
7195{
7196 u32 shmem;
7197 u32 validity_offset;
7198
7199 DP(NETIF_MSG_HW, "Starting\n");
7200
7201 /* Set `magic' bit in order to save MF config */
7202 if (!CHIP_IS_E1(bp))
7203 bnx2x_clp_reset_prep(bp, magic_val);
7204
7205 /* Get shmem offset */
7206 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7207 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7208
7209 /* Clear validity map flags */
7210 if (shmem > 0)
7211 REG_WR(bp, shmem + validity_offset, 0);
7212}
7213
7214#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
7215#define MCP_ONE_TIMEOUT 100 /* 100 ms */
7216
Dmitry Kravkove8920672011-05-04 23:52:40 +00007217/**
7218 * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007219 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00007220 * @bp: driver handle
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007221 */
7222static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
7223{
7224 /* special handling for emulation and FPGA,
7225 wait 10 times longer */
7226 if (CHIP_REV_IS_SLOW(bp))
7227 msleep(MCP_ONE_TIMEOUT*10);
7228 else
7229 msleep(MCP_ONE_TIMEOUT);
7230}
7231
Dmitry Kravkov1b6e2ce2011-05-22 10:11:26 +00007232/*
7233 * initializes bp->common.shmem_base and waits for validity signature to appear
7234 */
7235static int bnx2x_init_shmem(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007236{
Dmitry Kravkov1b6e2ce2011-05-22 10:11:26 +00007237 int cnt = 0;
7238 u32 val = 0;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007239
Dmitry Kravkov1b6e2ce2011-05-22 10:11:26 +00007240 do {
7241 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7242 if (bp->common.shmem_base) {
7243 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7244 if (val & SHR_MEM_VALIDITY_MB)
7245 return 0;
7246 }
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007247
7248 bnx2x_mcp_wait_one(bp);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007249
Dmitry Kravkov1b6e2ce2011-05-22 10:11:26 +00007250 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007251
Dmitry Kravkov1b6e2ce2011-05-22 10:11:26 +00007252 BNX2X_ERR("BAD MCP validity signature\n");
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007253
Dmitry Kravkov1b6e2ce2011-05-22 10:11:26 +00007254 return -ENODEV;
7255}
7256
7257static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
7258{
7259 int rc = bnx2x_init_shmem(bp);
7260
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007261 /* Restore the `magic' bit value */
7262 if (!CHIP_IS_E1(bp))
7263 bnx2x_clp_reset_done(bp, magic_val);
7264
7265 return rc;
7266}
7267
7268static void bnx2x_pxp_prep(struct bnx2x *bp)
7269{
7270 if (!CHIP_IS_E1(bp)) {
7271 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
7272 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
7273 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
7274 mmiowb();
7275 }
7276}
7277
7278/*
7279 * Reset the whole chip except for:
7280 * - PCIE core
7281 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
7282 * one reset bit)
7283 * - IGU
7284 * - MISC (including AEU)
7285 * - GRC
7286 * - RBCN, RBCP
7287 */
7288static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
7289{
7290 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
7291
7292 not_reset_mask1 =
7293 MISC_REGISTERS_RESET_REG_1_RST_HC |
7294 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
7295 MISC_REGISTERS_RESET_REG_1_RST_PXP;
7296
7297 not_reset_mask2 =
7298 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
7299 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
7300 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
7301 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
7302 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
7303 MISC_REGISTERS_RESET_REG_2_RST_GRC |
7304 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
7305 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
7306
7307 reset_mask1 = 0xffffffff;
7308
7309 if (CHIP_IS_E1(bp))
7310 reset_mask2 = 0xffff;
7311 else
7312 reset_mask2 = 0x1ffff;
7313
7314 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7315 reset_mask1 & (~not_reset_mask1));
7316 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7317 reset_mask2 & (~not_reset_mask2));
7318
7319 barrier();
7320 mmiowb();
7321
7322 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
7323 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
7324 mmiowb();
7325}
7326
7327static int bnx2x_process_kill(struct bnx2x *bp)
7328{
7329 int cnt = 1000;
7330 u32 val = 0;
7331 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
7332
7333
7334 /* Empty the Tetris buffer, wait for 1s */
7335 do {
7336 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
7337 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
7338 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
7339 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
7340 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
7341 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
7342 ((port_is_idle_0 & 0x1) == 0x1) &&
7343 ((port_is_idle_1 & 0x1) == 0x1) &&
7344 (pgl_exp_rom2 == 0xffffffff))
7345 break;
7346 msleep(1);
7347 } while (cnt-- > 0);
7348
7349 if (cnt <= 0) {
7350 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
7351 " are still"
7352 " outstanding read requests after 1s!\n");
7353 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
7354 " port_is_idle_0=0x%08x,"
7355 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
7356 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
7357 pgl_exp_rom2);
7358 return -EAGAIN;
7359 }
7360
7361 barrier();
7362
7363 /* Close gates #2, #3 and #4 */
7364 bnx2x_set_234_gates(bp, true);
7365
7366 /* TBD: Indicate that "process kill" is in progress to MCP */
7367
7368 /* Clear "unprepared" bit */
7369 REG_WR(bp, MISC_REG_UNPREPARED, 0);
7370 barrier();
7371
7372 /* Make sure all is written to the chip before the reset */
7373 mmiowb();
7374
7375 /* Wait for 1ms to empty GLUE and PCI-E core queues,
7376 * PSWHST, GRC and PSWRD Tetris buffer.
7377 */
7378 msleep(1);
7379
7380 /* Prepare to chip reset: */
7381 /* MCP */
7382 bnx2x_reset_mcp_prep(bp, &val);
7383
7384 /* PXP */
7385 bnx2x_pxp_prep(bp);
7386 barrier();
7387
7388 /* reset the chip */
7389 bnx2x_process_kill_chip_reset(bp);
7390 barrier();
7391
7392 /* Recover after reset: */
7393 /* MCP */
7394 if (bnx2x_reset_mcp_comp(bp, val))
7395 return -EAGAIN;
7396
7397 /* PXP */
7398 bnx2x_pxp_prep(bp);
7399
7400 /* Open the gates #2, #3 and #4 */
7401 bnx2x_set_234_gates(bp, false);
7402
7403 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7404 * reset state, re-enable attentions. */
7405
7406 return 0;
7407}
7408
7409static int bnx2x_leader_reset(struct bnx2x *bp)
7410{
7411 int rc = 0;
7412 /* Try to recover after the failure */
7413 if (bnx2x_process_kill(bp)) {
7414 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
7415 bp->dev->name);
7416 rc = -EAGAIN;
7417 goto exit_leader_reset;
7418 }
7419
7420 /* Clear "reset is in progress" bit and update the driver state */
7421 bnx2x_set_reset_done(bp);
7422 bp->recovery_state = BNX2X_RECOVERY_DONE;
7423
7424exit_leader_reset:
7425 bp->is_leader = 0;
7426 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
7427 smp_wmb();
7428 return rc;
7429}
7430
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007431/* Assumption: runs under rtnl lock. This together with the fact
7432 * that it's called only from bnx2x_reset_task() ensure that it
7433 * will never be called when netif_running(bp->dev) is false.
7434 */
7435static void bnx2x_parity_recover(struct bnx2x *bp)
7436{
7437 DP(NETIF_MSG_HW, "Handling parity\n");
7438 while (1) {
7439 switch (bp->recovery_state) {
7440 case BNX2X_RECOVERY_INIT:
7441 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
7442 /* Try to get a LEADER_LOCK HW lock */
7443 if (bnx2x_trylock_hw_lock(bp,
7444 HW_LOCK_RESOURCE_RESERVED_08))
7445 bp->is_leader = 1;
7446
7447 /* Stop the driver */
7448 /* If interface has been removed - break */
7449 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
7450 return;
7451
7452 bp->recovery_state = BNX2X_RECOVERY_WAIT;
7453 /* Ensure "is_leader" and "recovery_state"
7454 * update values are seen on other CPUs
7455 */
7456 smp_wmb();
7457 break;
7458
7459 case BNX2X_RECOVERY_WAIT:
7460 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
7461 if (bp->is_leader) {
7462 u32 load_counter = bnx2x_get_load_cnt(bp);
7463 if (load_counter) {
7464 /* Wait until all other functions get
7465 * down.
7466 */
7467 schedule_delayed_work(&bp->reset_task,
7468 HZ/10);
7469 return;
7470 } else {
7471 /* If all other functions got down -
7472 * try to bring the chip back to
7473 * normal. In any case it's an exit
7474 * point for a leader.
7475 */
7476 if (bnx2x_leader_reset(bp) ||
7477 bnx2x_nic_load(bp, LOAD_NORMAL)) {
7478 printk(KERN_ERR"%s: Recovery "
7479 "has failed. Power cycle is "
7480 "needed.\n", bp->dev->name);
7481 /* Disconnect this device */
7482 netif_device_detach(bp->dev);
7483 /* Block ifup for all function
7484 * of this ASIC until
7485 * "process kill" or power
7486 * cycle.
7487 */
7488 bnx2x_set_reset_in_progress(bp);
7489 /* Shut down the power */
7490 bnx2x_set_power_state(bp,
7491 PCI_D3hot);
7492 return;
7493 }
7494
7495 return;
7496 }
7497 } else { /* non-leader */
7498 if (!bnx2x_reset_is_done(bp)) {
7499 /* Try to get a LEADER_LOCK HW lock as
7500 * long as a former leader may have
7501 * been unloaded by the user or
7502 * released a leadership by another
7503 * reason.
7504 */
7505 if (bnx2x_trylock_hw_lock(bp,
7506 HW_LOCK_RESOURCE_RESERVED_08)) {
7507 /* I'm a leader now! Restart a
7508 * switch case.
7509 */
7510 bp->is_leader = 1;
7511 break;
7512 }
7513
7514 schedule_delayed_work(&bp->reset_task,
7515 HZ/10);
7516 return;
7517
7518 } else { /* A leader has completed
7519 * the "process kill". It's an exit
7520 * point for a non-leader.
7521 */
7522 bnx2x_nic_load(bp, LOAD_NORMAL);
7523 bp->recovery_state =
7524 BNX2X_RECOVERY_DONE;
7525 smp_wmb();
7526 return;
7527 }
7528 }
7529 default:
7530 return;
7531 }
7532 }
7533}
7534
7535/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7536 * scheduled on a general queue in order to prevent a dead lock.
7537 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007538static void bnx2x_reset_task(struct work_struct *work)
7539{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007540 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007541
7542#ifdef BNX2X_STOP_ON_ERROR
7543 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7544 " so reset not done to allow debug dump,\n"
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007545 KERN_ERR " you will need to reboot when done\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007546 return;
7547#endif
7548
7549 rtnl_lock();
7550
7551 if (!netif_running(bp->dev))
7552 goto reset_task_exit;
7553
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007554 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
7555 bnx2x_parity_recover(bp);
7556 else {
7557 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7558 bnx2x_nic_load(bp, LOAD_NORMAL);
7559 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007560
7561reset_task_exit:
7562 rtnl_unlock();
7563}
7564
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007565/* end of nic load/unload */
7566
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007567/*
7568 * Init service functions
7569 */
7570
stephen hemminger8d962862010-10-21 07:50:56 +00007571static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007572{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007573 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
7574 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
7575 return base + (BP_ABS_FUNC(bp)) * stride;
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007576}
7577
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007578static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007579{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007580 u32 reg = bnx2x_get_pretend_reg(bp);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007581
7582 /* Flush all outstanding writes */
7583 mmiowb();
7584
7585 /* Pretend to be function 0 */
7586 REG_WR(bp, reg, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007587 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007588
7589 /* From now we are in the "like-E1" mode */
7590 bnx2x_int_disable(bp);
7591
7592 /* Flush all outstanding writes */
7593 mmiowb();
7594
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007595 /* Restore the original function */
7596 REG_WR(bp, reg, BP_ABS_FUNC(bp));
7597 REG_RD(bp, reg);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007598}
7599
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007600static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007601{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007602 if (CHIP_IS_E1(bp))
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007603 bnx2x_int_disable(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007604 else
7605 bnx2x_undi_int_disable_e1h(bp);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007606}
7607
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007608static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007609{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007610 u32 val;
7611
7612 /* Check if there is any driver already loaded */
7613 val = REG_RD(bp, MISC_REG_UNPREPARED);
7614 if (val == 0x1) {
7615 /* Check if it is the UNDI driver
7616 * UNDI driver initializes CID offset for normal bell to 0x7
7617 */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07007618 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007619 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7620 if (val == 0x7) {
7621 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007622 /* save our pf_num */
7623 int orig_pf_num = bp->pf_num;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007624 u32 swap_en;
7625 u32 swap_val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007626
Eilon Greensteinb4661732009-01-14 06:43:56 +00007627 /* clear the UNDI indication */
7628 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7629
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007630 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7631
7632 /* try unload UNDI on port 0 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007633 bp->pf_num = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007634 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007635 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007636 DRV_MSG_SEQ_NUMBER_MASK);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007637 reset_code = bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007638
7639 /* if UNDI is loaded on the other port */
7640 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7641
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007642 /* send "DONE" for previous unload */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007643 bnx2x_fw_command(bp,
7644 DRV_MSG_CODE_UNLOAD_DONE, 0);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007645
7646 /* unload UNDI on port 1 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007647 bp->pf_num = 1;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007648 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007649 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007650 DRV_MSG_SEQ_NUMBER_MASK);
7651 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007652
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007653 bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007654 }
7655
Eilon Greensteinb4661732009-01-14 06:43:56 +00007656 /* now it's safe to release the lock */
7657 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7658
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007659 bnx2x_undi_int_disable(bp);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007660
7661 /* close input traffic and wait for it */
7662 /* Do not rcv packets to BRB */
7663 REG_WR(bp,
7664 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7665 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7666 /* Do not direct rcv packets that are not for MCP to
7667 * the BRB */
7668 REG_WR(bp,
7669 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7670 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7671 /* clear AEU */
7672 REG_WR(bp,
7673 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7674 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7675 msleep(10);
7676
7677 /* save NIG port swap info */
7678 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7679 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007680 /* reset device */
7681 REG_WR(bp,
7682 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007683 0xd3ffffff);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007684 REG_WR(bp,
7685 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7686 0x1403);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007687 /* take the NIG out of reset and restore swap values */
7688 REG_WR(bp,
7689 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7690 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7691 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7692 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7693
7694 /* send unload done to the MCP */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007695 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007696
7697 /* restore our func and fw_seq */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007698 bp->pf_num = orig_pf_num;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007699 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007700 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007701 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greensteinb4661732009-01-14 06:43:56 +00007702 } else
7703 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007704 }
7705}
7706
7707static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7708{
7709 u32 val, val2, val3, val4, id;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07007710 u16 pmc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007711
7712 /* Get the chip revision id and number. */
7713 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7714 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7715 id = ((val & 0xffff) << 16);
7716 val = REG_RD(bp, MISC_REG_CHIP_REV);
7717 id |= ((val & 0xf) << 12);
7718 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7719 id |= ((val & 0xff) << 4);
Eilon Greenstein5a40e082009-01-14 06:44:04 +00007720 val = REG_RD(bp, MISC_REG_BOND_ID);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007721 id |= (val & 0xf);
7722 bp->common.chip_id = id;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007723
7724 /* Set doorbell size */
7725 bp->db_size = (1 << BNX2X_DB_SHIFT);
7726
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007727 if (CHIP_IS_E2(bp)) {
7728 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7729 if ((val & 1) == 0)
7730 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7731 else
7732 val = (val >> 1) & 1;
7733 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7734 "2_PORT_MODE");
7735 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7736 CHIP_2_PORT_MODE;
7737
7738 if (CHIP_MODE_IS_4_PORT(bp))
7739 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
7740 else
7741 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
7742 } else {
7743 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
7744 bp->pfid = bp->pf_num; /* 0..7 */
7745 }
7746
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007747 /*
7748 * set base FW non-default (fast path) status block id, this value is
7749 * used to initialize the fw_sb_id saved on the fp/queue structure to
7750 * determine the id used by the FW.
7751 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007752 if (CHIP_IS_E1x(bp))
7753 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7754 else /* E2 */
7755 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7756
7757 bp->link_params.chip_id = bp->common.chip_id;
7758 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007759
Eilon Greenstein1c063282009-02-12 08:36:43 +00007760 val = (REG_RD(bp, 0x2874) & 0x55);
7761 if ((bp->common.chip_id & 0x1) ||
7762 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7763 bp->flags |= ONE_PORT_FLAG;
7764 BNX2X_DEV_INFO("single port device\n");
7765 }
7766
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007767 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7768 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7769 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7770 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7771 bp->common.flash_size, bp->common.flash_size);
7772
Dmitry Kravkov1b6e2ce2011-05-22 10:11:26 +00007773 bnx2x_init_shmem(bp);
7774
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007775 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
7776 MISC_REG_GENERIC_CR_1 :
7777 MISC_REG_GENERIC_CR_0));
Dmitry Kravkov1b6e2ce2011-05-22 10:11:26 +00007778
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007779 bp->link_params.shmem_base = bp->common.shmem_base;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007780 bp->link_params.shmem2_base = bp->common.shmem2_base;
Eilon Greenstein2691d512009-08-12 08:22:08 +00007781 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7782 bp->common.shmem_base, bp->common.shmem2_base);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007783
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007784 if (!bp->common.shmem_base) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007785 BNX2X_DEV_INFO("MCP not active\n");
7786 bp->flags |= NO_MCP_FLAG;
7787 return;
7788 }
7789
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007790 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00007791 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007792
7793 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7794 SHARED_HW_CFG_LED_MODE_MASK) >>
7795 SHARED_HW_CFG_LED_MODE_SHIFT);
7796
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00007797 bp->link_params.feature_config_flags = 0;
7798 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7799 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7800 bp->link_params.feature_config_flags |=
7801 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7802 else
7803 bp->link_params.feature_config_flags &=
7804 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7805
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007806 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7807 bp->common.bc_ver = val;
7808 BNX2X_DEV_INFO("bc_ver %X\n", val);
7809 if (val < BNX2X_BC_VER) {
7810 /* for now only warn
7811 * later we might need to enforce this */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007812 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
7813 "please upgrade BC\n", BNX2X_BC_VER, val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007814 }
Eilon Greenstein4d295db2009-07-21 05:47:47 +00007815 bp->link_params.feature_config_flags |=
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007816 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007817 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7818
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007819 bp->link_params.feature_config_flags |=
7820 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
7821 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07007822
Dmitry Kravkovf9a3ebb2011-05-04 23:49:11 +00007823 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7824 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7825
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07007826 BNX2X_DEV_INFO("%sWoL capable\n",
Eilon Greensteinf5372252009-02-12 08:38:30 +00007827 (bp->flags & NO_WOL_FLAG) ? "not " : "");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007828
7829 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7830 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7831 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7832 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7833
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007834 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
7835 val, val2, val3, val4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007836}
7837
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007838#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7839#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7840
7841static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7842{
7843 int pfid = BP_FUNC(bp);
7844 int vn = BP_E1HVN(bp);
7845 int igu_sb_id;
7846 u32 val;
7847 u8 fid;
7848
7849 bp->igu_base_sb = 0xff;
7850 bp->igu_sb_cnt = 0;
7851 if (CHIP_INT_MODE_IS_BC(bp)) {
7852 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00007853 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007854
7855 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
7856 FP_SB_MAX_E1x;
7857
7858 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
7859 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
7860
7861 return;
7862 }
7863
7864 /* IGU in normal mode - read CAM */
7865 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
7866 igu_sb_id++) {
7867 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
7868 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
7869 continue;
7870 fid = IGU_FID(val);
7871 if ((fid & IGU_FID_ENCODE_IS_PF)) {
7872 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
7873 continue;
7874 if (IGU_VEC(val) == 0)
7875 /* default status block */
7876 bp->igu_dsb_id = igu_sb_id;
7877 else {
7878 if (bp->igu_base_sb == 0xff)
7879 bp->igu_base_sb = igu_sb_id;
7880 bp->igu_sb_cnt++;
7881 }
7882 }
7883 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00007884 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
7885 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007886 if (bp->igu_sb_cnt == 0)
7887 BNX2X_ERR("CAM configuration error\n");
7888}
7889
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007890static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7891 u32 switch_cfg)
7892{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007893 int cfg_size = 0, idx, port = BP_PORT(bp);
7894
7895 /* Aggregation of supported attributes of all external phys */
7896 bp->port.supported[0] = 0;
7897 bp->port.supported[1] = 0;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007898 switch (bp->link_params.num_phys) {
7899 case 1:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007900 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
7901 cfg_size = 1;
7902 break;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007903 case 2:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007904 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
7905 cfg_size = 1;
7906 break;
7907 case 3:
7908 if (bp->link_params.multi_phy_config &
7909 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
7910 bp->port.supported[1] =
7911 bp->link_params.phy[EXT_PHY1].supported;
7912 bp->port.supported[0] =
7913 bp->link_params.phy[EXT_PHY2].supported;
7914 } else {
7915 bp->port.supported[0] =
7916 bp->link_params.phy[EXT_PHY1].supported;
7917 bp->port.supported[1] =
7918 bp->link_params.phy[EXT_PHY2].supported;
7919 }
7920 cfg_size = 2;
7921 break;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007922 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007923
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007924 if (!(bp->port.supported[0] || bp->port.supported[1])) {
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007925 BNX2X_ERR("NVRAM config error. BAD phy config."
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007926 "PHY1 config 0x%x, PHY2 config 0x%x\n",
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007927 SHMEM_RD(bp,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007928 dev_info.port_hw_config[port].external_phy_config),
7929 SHMEM_RD(bp,
7930 dev_info.port_hw_config[port].external_phy_config2));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007931 return;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007932 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007933
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007934 switch (switch_cfg) {
7935 case SWITCH_CFG_1G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007936 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7937 port*0x10);
7938 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007939 break;
7940
7941 case SWITCH_CFG_10G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007942 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7943 port*0x18);
7944 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007945 break;
7946
7947 default:
7948 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007949 bp->port.link_config[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007950 return;
7951 }
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007952 /* mask what we support according to speed_cap_mask per configuration */
7953 for (idx = 0; idx < cfg_size; idx++) {
7954 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007955 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007956 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007957
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007958 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007959 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007960 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007961
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007962 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007963 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007964 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007965
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007966 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007967 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007968 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007969
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007970 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007971 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007972 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007973 SUPPORTED_1000baseT_Full);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007974
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007975 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007976 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007977 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007978
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007979 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007980 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007981 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007982
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007983 }
7984
7985 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
7986 bp->port.supported[1]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007987}
7988
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007989static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007990{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007991 u32 link_config, idx, cfg_size = 0;
7992 bp->port.advertising[0] = 0;
7993 bp->port.advertising[1] = 0;
7994 switch (bp->link_params.num_phys) {
7995 case 1:
7996 case 2:
7997 cfg_size = 1;
7998 break;
7999 case 3:
8000 cfg_size = 2;
8001 break;
8002 }
8003 for (idx = 0; idx < cfg_size; idx++) {
8004 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
8005 link_config = bp->port.link_config[idx];
8006 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008007 case PORT_FEATURE_LINK_SPEED_AUTO:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008008 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
8009 bp->link_params.req_line_speed[idx] =
8010 SPEED_AUTO_NEG;
8011 bp->port.advertising[idx] |=
8012 bp->port.supported[idx];
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008013 } else {
8014 /* force 10G, no AN */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008015 bp->link_params.req_line_speed[idx] =
8016 SPEED_10000;
8017 bp->port.advertising[idx] |=
8018 (ADVERTISED_10000baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008019 ADVERTISED_FIBRE);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008020 continue;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008021 }
8022 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008023
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008024 case PORT_FEATURE_LINK_SPEED_10M_FULL:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008025 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
8026 bp->link_params.req_line_speed[idx] =
8027 SPEED_10;
8028 bp->port.advertising[idx] |=
8029 (ADVERTISED_10baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008030 ADVERTISED_TP);
8031 } else {
8032 BNX2X_ERROR("NVRAM config error. "
8033 "Invalid link_config 0x%x"
8034 " speed_cap_mask 0x%x\n",
8035 link_config,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008036 bp->link_params.speed_cap_mask[idx]);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008037 return;
8038 }
8039 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008040
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008041 case PORT_FEATURE_LINK_SPEED_10M_HALF:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008042 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
8043 bp->link_params.req_line_speed[idx] =
8044 SPEED_10;
8045 bp->link_params.req_duplex[idx] =
8046 DUPLEX_HALF;
8047 bp->port.advertising[idx] |=
8048 (ADVERTISED_10baseT_Half |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008049 ADVERTISED_TP);
8050 } else {
8051 BNX2X_ERROR("NVRAM config error. "
8052 "Invalid link_config 0x%x"
8053 " speed_cap_mask 0x%x\n",
8054 link_config,
8055 bp->link_params.speed_cap_mask[idx]);
8056 return;
8057 }
8058 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008059
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008060 case PORT_FEATURE_LINK_SPEED_100M_FULL:
8061 if (bp->port.supported[idx] &
8062 SUPPORTED_100baseT_Full) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008063 bp->link_params.req_line_speed[idx] =
8064 SPEED_100;
8065 bp->port.advertising[idx] |=
8066 (ADVERTISED_100baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008067 ADVERTISED_TP);
8068 } else {
8069 BNX2X_ERROR("NVRAM config error. "
8070 "Invalid link_config 0x%x"
8071 " speed_cap_mask 0x%x\n",
8072 link_config,
8073 bp->link_params.speed_cap_mask[idx]);
8074 return;
8075 }
8076 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008077
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008078 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8079 if (bp->port.supported[idx] &
8080 SUPPORTED_100baseT_Half) {
8081 bp->link_params.req_line_speed[idx] =
8082 SPEED_100;
8083 bp->link_params.req_duplex[idx] =
8084 DUPLEX_HALF;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008085 bp->port.advertising[idx] |=
8086 (ADVERTISED_100baseT_Half |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008087 ADVERTISED_TP);
8088 } else {
8089 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008090 "Invalid link_config 0x%x"
8091 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008092 link_config,
8093 bp->link_params.speed_cap_mask[idx]);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008094 return;
8095 }
8096 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008097
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008098 case PORT_FEATURE_LINK_SPEED_1G:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008099 if (bp->port.supported[idx] &
8100 SUPPORTED_1000baseT_Full) {
8101 bp->link_params.req_line_speed[idx] =
8102 SPEED_1000;
8103 bp->port.advertising[idx] |=
8104 (ADVERTISED_1000baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008105 ADVERTISED_TP);
8106 } else {
8107 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008108 "Invalid link_config 0x%x"
8109 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008110 link_config,
8111 bp->link_params.speed_cap_mask[idx]);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008112 return;
8113 }
8114 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008115
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008116 case PORT_FEATURE_LINK_SPEED_2_5G:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008117 if (bp->port.supported[idx] &
8118 SUPPORTED_2500baseX_Full) {
8119 bp->link_params.req_line_speed[idx] =
8120 SPEED_2500;
8121 bp->port.advertising[idx] |=
8122 (ADVERTISED_2500baseX_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008123 ADVERTISED_TP);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008124 } else {
8125 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008126 "Invalid link_config 0x%x"
8127 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008128 link_config,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008129 bp->link_params.speed_cap_mask[idx]);
8130 return;
8131 }
8132 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008133
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008134 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8135 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8136 case PORT_FEATURE_LINK_SPEED_10G_KR:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008137 if (bp->port.supported[idx] &
8138 SUPPORTED_10000baseT_Full) {
8139 bp->link_params.req_line_speed[idx] =
8140 SPEED_10000;
8141 bp->port.advertising[idx] |=
8142 (ADVERTISED_10000baseT_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008143 ADVERTISED_FIBRE);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008144 } else {
8145 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008146 "Invalid link_config 0x%x"
8147 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008148 link_config,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008149 bp->link_params.speed_cap_mask[idx]);
8150 return;
8151 }
8152 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008153
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008154 default:
8155 BNX2X_ERROR("NVRAM config error. "
8156 "BAD link speed link_config 0x%x\n",
8157 link_config);
8158 bp->link_params.req_line_speed[idx] =
8159 SPEED_AUTO_NEG;
8160 bp->port.advertising[idx] =
8161 bp->port.supported[idx];
8162 break;
8163 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008164
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008165 bp->link_params.req_flow_ctrl[idx] = (link_config &
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008166 PORT_FEATURE_FLOW_CONTROL_MASK);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008167 if ((bp->link_params.req_flow_ctrl[idx] ==
8168 BNX2X_FLOW_CTRL_AUTO) &&
8169 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
8170 bp->link_params.req_flow_ctrl[idx] =
8171 BNX2X_FLOW_CTRL_NONE;
8172 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008173
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008174 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
8175 " 0x%x advertising 0x%x\n",
8176 bp->link_params.req_line_speed[idx],
8177 bp->link_params.req_duplex[idx],
8178 bp->link_params.req_flow_ctrl[idx],
8179 bp->port.advertising[idx]);
8180 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008181}
8182
Michael Chane665bfd2009-10-10 13:46:54 +00008183static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8184{
8185 mac_hi = cpu_to_be16(mac_hi);
8186 mac_lo = cpu_to_be32(mac_lo);
8187 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8188 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8189}
8190
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008191static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008192{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008193 int port = BP_PORT(bp);
Eilon Greenstein589abe32009-02-12 08:36:55 +00008194 u32 config;
Joe Perches6f38ad92010-11-14 17:04:31 +00008195 u32 ext_phy_type, ext_phy_config;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008196
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008197 bp->link_params.bp = bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008198 bp->link_params.port = port;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008199
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008200 bp->link_params.lane_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008201 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008202
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008203 bp->link_params.speed_cap_mask[0] =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008204 SHMEM_RD(bp,
8205 dev_info.port_hw_config[port].speed_capability_mask);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008206 bp->link_params.speed_cap_mask[1] =
8207 SHMEM_RD(bp,
8208 dev_info.port_hw_config[port].speed_capability_mask2);
8209 bp->port.link_config[0] =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008210 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8211
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008212 bp->port.link_config[1] =
8213 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008214
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008215 bp->link_params.multi_phy_config =
8216 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008217 /* If the device is capable of WoL, set the default state according
8218 * to the HW
8219 */
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008220 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008221 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8222 (config & PORT_FEATURE_WOL_ENABLED));
8223
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008224 BNX2X_DEV_INFO("lane_config 0x%08x "
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008225 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008226 bp->link_params.lane_config,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008227 bp->link_params.speed_cap_mask[0],
8228 bp->port.link_config[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008229
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008230 bp->link_params.switch_cfg = (bp->port.link_config[0] &
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008231 PORT_FEATURE_CONNECTED_SWITCH_MASK);
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008232 bnx2x_phy_probe(&bp->link_params);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008233 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008234
8235 bnx2x_link_settings_requested(bp);
8236
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008237 /*
8238 * If connected directly, work with the internal PHY, otherwise, work
8239 * with the external PHY
8240 */
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008241 ext_phy_config =
8242 SHMEM_RD(bp,
8243 dev_info.port_hw_config[port].external_phy_config);
8244 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008245 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008246 bp->mdio.prtad = bp->port.phy_addr;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008247
8248 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8249 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8250 bp->mdio.prtad =
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008251 XGXS_EXT_PHY_ADDR(ext_phy_config);
Yaniv Rosner5866df62011-01-30 04:15:07 +00008252
8253 /*
8254 * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
8255 * In MF mode, it is set to cover self test cases
8256 */
8257 if (IS_MF(bp))
8258 bp->port.need_hw_lock = 1;
8259 else
8260 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
8261 bp->common.shmem_base,
8262 bp->common.shmem2_base);
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008263}
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008264
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00008265#ifdef BCM_CNIC
8266static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
8267{
8268 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
8269 drv_lic_key[BP_PORT(bp)].max_iscsi_conn);
8270 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
8271 drv_lic_key[BP_PORT(bp)].max_fcoe_conn);
8272
8273 /* Get the number of maximum allowed iSCSI and FCoE connections */
8274 bp->cnic_eth_dev.max_iscsi_conn =
8275 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
8276 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
8277
8278 bp->cnic_eth_dev.max_fcoe_conn =
8279 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
8280 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
8281
8282 BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
8283 bp->cnic_eth_dev.max_iscsi_conn,
8284 bp->cnic_eth_dev.max_fcoe_conn);
8285
8286 /* If mamimum allowed number of connections is zero -
8287 * disable the feature.
8288 */
8289 if (!bp->cnic_eth_dev.max_iscsi_conn)
8290 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
8291
8292 if (!bp->cnic_eth_dev.max_fcoe_conn)
8293 bp->flags |= NO_FCOE_FLAG;
8294}
8295#endif
8296
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008297static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8298{
8299 u32 val, val2;
8300 int func = BP_ABS_FUNC(bp);
8301 int port = BP_PORT(bp);
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00008302#ifdef BCM_CNIC
8303 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
8304 u8 *fip_mac = bp->fip_mac;
8305#endif
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008306
8307 if (BP_NOMCP(bp)) {
8308 BNX2X_ERROR("warning: random MAC workaround active\n");
8309 random_ether_addr(bp->dev->dev_addr);
8310 } else if (IS_MF(bp)) {
8311 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8312 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
8313 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8314 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
8315 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8316
8317#ifdef BCM_CNIC
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00008318 /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
8319 * FCoE MAC then the appropriate feature should be disabled.
8320 */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008321 if (IS_MF_SI(bp)) {
8322 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
8323 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
8324 val2 = MF_CFG_RD(bp, func_ext_config[func].
8325 iscsi_mac_addr_upper);
8326 val = MF_CFG_RD(bp, func_ext_config[func].
8327 iscsi_mac_addr_lower);
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00008328 BNX2X_DEV_INFO("Read iSCSI MAC: "
8329 "0x%x:0x%04x\n", val2, val);
8330 bnx2x_set_mac_buf(iscsi_mac, val, val2);
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00008331 } else
8332 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
8333
8334 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
8335 val2 = MF_CFG_RD(bp, func_ext_config[func].
8336 fcoe_mac_addr_upper);
8337 val = MF_CFG_RD(bp, func_ext_config[func].
8338 fcoe_mac_addr_lower);
8339 BNX2X_DEV_INFO("Read FCoE MAC to "
8340 "0x%x:0x%04x\n", val2, val);
8341 bnx2x_set_mac_buf(fip_mac, val, val2);
8342
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00008343 } else
8344 bp->flags |= NO_FCOE_FLAG;
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008345 }
8346#endif
8347 } else {
8348 /* in SF read MACs from port configuration */
8349 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8350 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8351 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8352
8353#ifdef BCM_CNIC
8354 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
8355 iscsi_mac_upper);
8356 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
8357 iscsi_mac_lower);
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00008358 bnx2x_set_mac_buf(iscsi_mac, val, val2);
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008359#endif
8360 }
8361
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008362 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8363 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
Michael Chan37b091b2009-10-10 13:46:55 +00008364
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00008365#ifdef BCM_CNIC
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00008366 /* Set the FCoE MAC in modes other then MF_SI */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00008367 if (!CHIP_IS_E1x(bp)) {
8368 if (IS_MF_SD(bp))
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00008369 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
8370 else if (!IS_MF(bp))
8371 memcpy(fip_mac, iscsi_mac, ETH_ALEN);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00008372 }
Dmitry Kravkov426b9242011-05-04 23:49:53 +00008373
8374 /* Disable iSCSI if MAC configuration is
8375 * invalid.
8376 */
8377 if (!is_valid_ether_addr(iscsi_mac)) {
8378 bp->flags |= NO_ISCSI_FLAG;
8379 memset(iscsi_mac, 0, ETH_ALEN);
8380 }
8381
8382 /* Disable FCoE if MAC configuration is
8383 * invalid.
8384 */
8385 if (!is_valid_ether_addr(fip_mac)) {
8386 bp->flags |= NO_FCOE_FLAG;
8387 memset(bp->fip_mac, 0, ETH_ALEN);
8388 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00008389#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008390}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008391
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008392static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8393{
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008394 int /*abs*/func = BP_ABS_FUNC(bp);
David S. Millerb8ee8322011-04-17 16:56:12 -07008395 int vn;
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008396 u32 val = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008397 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008398
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008399 bnx2x_get_common_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008400
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008401 if (CHIP_IS_E1x(bp)) {
8402 bp->common.int_block = INT_BLOCK_HC;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008403
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008404 bp->igu_dsb_id = DEF_SB_IGU_ID;
8405 bp->igu_base_sb = 0;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00008406 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
8407 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008408 } else {
8409 bp->common.int_block = INT_BLOCK_IGU;
8410 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8411 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8412 DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8413 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8414 } else
8415 DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
8416
8417 bnx2x_get_igu_cam_info(bp);
8418
8419 }
8420 DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
8421 bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8422
8423 /*
8424 * Initialize MF configuration
8425 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008426
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008427 bp->mf_ov = 0;
8428 bp->mf_mode = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008429 vn = BP_E1HVN(bp);
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008430
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008431 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008432 DP(NETIF_MSG_PROBE,
8433 "shmem2base 0x%x, size %d, mfcfg offset %d\n",
8434 bp->common.shmem2_base, SHMEM2_RD(bp, size),
8435 (u32)offsetof(struct shmem2_region, mf_cfg_addr));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008436 if (SHMEM2_HAS(bp, mf_cfg_addr))
8437 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8438 else
8439 bp->common.mf_cfg_base = bp->common.shmem_base +
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008440 offsetof(struct shmem_region, func_mb) +
8441 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008442 /*
8443 * get mf configuration:
Lucas De Marchi25985ed2011-03-30 22:57:33 -03008444 * 1. existence of MF configuration
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008445 * 2. MAC address must be legal (check only upper bytes)
8446 * for Switch-Independent mode;
8447 * OVLAN must be legal for Switch-Dependent mode
8448 * 3. SF_MODE configures specific MF mode
8449 */
8450 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
8451 /* get mf configuration */
8452 val = SHMEM_RD(bp,
8453 dev_info.shared_feature_config.config);
8454 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008455
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008456 switch (val) {
8457 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
8458 val = MF_CFG_RD(bp, func_mf_config[func].
8459 mac_upper);
8460 /* check for legal mac (upper bytes)*/
8461 if (val != 0xffff) {
8462 bp->mf_mode = MULTI_FUNCTION_SI;
8463 bp->mf_config[vn] = MF_CFG_RD(bp,
8464 func_mf_config[func].config);
8465 } else
8466 DP(NETIF_MSG_PROBE, "illegal MAC "
8467 "address for SI\n");
8468 break;
8469 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
8470 /* get OV configuration */
8471 val = MF_CFG_RD(bp,
8472 func_mf_config[FUNC_0].e1hov_tag);
8473 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
8474
8475 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8476 bp->mf_mode = MULTI_FUNCTION_SD;
8477 bp->mf_config[vn] = MF_CFG_RD(bp,
8478 func_mf_config[func].config);
8479 } else
8480 DP(NETIF_MSG_PROBE, "illegal OV for "
8481 "SD\n");
8482 break;
8483 default:
8484 /* Unknown configuration: reset mf_config */
8485 bp->mf_config[vn] = 0;
Lucas De Marchi25985ed2011-03-30 22:57:33 -03008486 DP(NETIF_MSG_PROBE, "Unknown MF mode 0x%x\n",
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008487 val);
8488 }
8489 }
8490
Eilon Greenstein2691d512009-08-12 08:22:08 +00008491 BNX2X_DEV_INFO("%s function mode\n",
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008492 IS_MF(bp) ? "multi" : "single");
Eilon Greenstein2691d512009-08-12 08:22:08 +00008493
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008494 switch (bp->mf_mode) {
8495 case MULTI_FUNCTION_SD:
8496 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
8497 FUNC_MF_CFG_E1HOV_TAG_MASK;
Eilon Greenstein2691d512009-08-12 08:22:08 +00008498 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008499 bp->mf_ov = val;
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008500 BNX2X_DEV_INFO("MF OV for func %d is %d"
8501 " (0x%04x)\n", func,
8502 bp->mf_ov, bp->mf_ov);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008503 } else {
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008504 BNX2X_ERR("No valid MF OV for func %d,"
8505 " aborting\n", func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008506 rc = -EPERM;
8507 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008508 break;
8509 case MULTI_FUNCTION_SI:
8510 BNX2X_DEV_INFO("func %d is in MF "
8511 "switch-independent mode\n", func);
8512 break;
8513 default:
8514 if (vn) {
8515 BNX2X_ERR("VN %d in single function mode,"
8516 " aborting\n", vn);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008517 rc = -EPERM;
8518 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008519 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008520 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008521
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008522 }
8523
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008524 /* adjust igu_sb_cnt to MF for E1x */
8525 if (CHIP_IS_E1x(bp) && IS_MF(bp))
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008526 bp->igu_sb_cnt /= E1HVN_MAX;
8527
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008528 /*
8529 * adjust E2 sb count: to be removed when FW will support
8530 * more then 16 L2 clients
8531 */
8532#define MAX_L2_CLIENTS 16
8533 if (CHIP_IS_E2(bp))
8534 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8535 MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8536
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008537 if (!BP_NOMCP(bp)) {
8538 bnx2x_get_port_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008539
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008540 bp->fw_seq =
8541 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8542 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008543 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8544 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008545
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008546 /* Get MAC addresses */
8547 bnx2x_get_mac_hwinfo(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008548
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00008549#ifdef BCM_CNIC
8550 bnx2x_get_cnic_info(bp);
8551#endif
8552
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008553 return rc;
8554}
8555
Vladislav Zolotarov34f24c72010-04-19 01:13:23 +00008556static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
8557{
8558 int cnt, i, block_end, rodi;
8559 char vpd_data[BNX2X_VPD_LEN+1];
8560 char str_id_reg[VENDOR_ID_LEN+1];
8561 char str_id_cap[VENDOR_ID_LEN+1];
8562 u8 len;
8563
8564 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
8565 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
8566
8567 if (cnt < BNX2X_VPD_LEN)
8568 goto out_not_found;
8569
8570 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
8571 PCI_VPD_LRDT_RO_DATA);
8572 if (i < 0)
8573 goto out_not_found;
8574
8575
8576 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
8577 pci_vpd_lrdt_size(&vpd_data[i]);
8578
8579 i += PCI_VPD_LRDT_TAG_SIZE;
8580
8581 if (block_end > BNX2X_VPD_LEN)
8582 goto out_not_found;
8583
8584 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8585 PCI_VPD_RO_KEYWORD_MFR_ID);
8586 if (rodi < 0)
8587 goto out_not_found;
8588
8589 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8590
8591 if (len != VENDOR_ID_LEN)
8592 goto out_not_found;
8593
8594 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8595
8596 /* vendor specific info */
8597 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
8598 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
8599 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
8600 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
8601
8602 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8603 PCI_VPD_RO_KEYWORD_VENDOR0);
8604 if (rodi >= 0) {
8605 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8606
8607 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8608
8609 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
8610 memcpy(bp->fw_ver, &vpd_data[rodi], len);
8611 bp->fw_ver[len] = ' ';
8612 }
8613 }
8614 return;
8615 }
8616out_not_found:
8617 return;
8618}
8619
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008620static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8621{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008622 int func;
Eilon Greenstein87942b42009-02-12 08:36:49 +00008623 int timer_interval;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008624 int rc;
8625
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008626 mutex_init(&bp->port.phy_mutex);
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07008627 mutex_init(&bp->fw_mb_mutex);
David S. Millerbb7e95c2010-07-27 21:01:35 -07008628 spin_lock_init(&bp->stats_lock);
Michael Chan993ac7b2009-10-10 13:46:56 +00008629#ifdef BCM_CNIC
8630 mutex_init(&bp->cnic_mutex);
8631#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008632
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08008633 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008634 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008635
8636 rc = bnx2x_get_hwinfo(bp);
8637
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008638 if (!rc)
8639 rc = bnx2x_alloc_mem_bp(bp);
8640
Vladislav Zolotarov34f24c72010-04-19 01:13:23 +00008641 bnx2x_read_fwinfo(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008642
8643 func = BP_FUNC(bp);
8644
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008645 /* need to reset chip if undi was active */
8646 if (!BP_NOMCP(bp))
8647 bnx2x_undi_unload(bp);
8648
8649 if (CHIP_REV_IS_FPGA(bp))
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008650 dev_err(&bp->pdev->dev, "FPGA detected\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008651
8652 if (BP_NOMCP(bp) && (func == 0))
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008653 dev_err(&bp->pdev->dev, "MCP disabled, "
8654 "must load devices in order!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008655
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008656 bp->multi_mode = multi_mode;
Dmitry Kravkov5d7cd492010-07-27 12:32:19 +00008657 bp->int_mode = int_mode;
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008658
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008659 /* Set TPA flags */
8660 if (disable_tpa) {
8661 bp->flags &= ~TPA_ENABLE_FLAG;
8662 bp->dev->features &= ~NETIF_F_LRO;
8663 } else {
8664 bp->flags |= TPA_ENABLE_FLAG;
8665 bp->dev->features |= NETIF_F_LRO;
8666 }
Dmitry Kravkov5d7cd492010-07-27 12:32:19 +00008667 bp->disable_tpa = disable_tpa;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008668
Eilon Greensteina18f5122009-08-12 08:23:26 +00008669 if (CHIP_IS_E1(bp))
8670 bp->dropless_fc = 0;
8671 else
8672 bp->dropless_fc = dropless_fc;
8673
Eilon Greenstein8d5726c2009-02-12 08:37:19 +00008674 bp->mrrs = mrrs;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008675
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008676 bp->tx_ring_size = MAX_TX_AVAIL;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008677
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00008678 /* make sure that the numbers are in the right granularity */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008679 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
8680 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008681
Eilon Greenstein87942b42009-02-12 08:36:49 +00008682 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8683 bp->current_interval = (poll ? poll : timer_interval);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008684
8685 init_timer(&bp->timer);
8686 bp->timer.expires = jiffies + bp->current_interval;
8687 bp->timer.data = (unsigned long) bp;
8688 bp->timer.function = bnx2x_timer;
8689
Shmulik Ravid785b9b12010-12-30 06:27:03 +00008690 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
Vladislav Zolotarove4901dd2010-12-13 05:44:18 +00008691 bnx2x_dcbx_init_params(bp);
8692
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008693 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008694}
8695
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008696
Dmitry Kravkovde0c62d2010-07-27 12:35:24 +00008697/****************************************************************************
8698* General service functions
8699****************************************************************************/
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008700
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008701/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008702static int bnx2x_open(struct net_device *dev)
8703{
8704 struct bnx2x *bp = netdev_priv(dev);
8705
Eilon Greenstein6eccabb2009-01-22 03:37:48 +00008706 netif_carrier_off(dev);
8707
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008708 bnx2x_set_power_state(bp, PCI_D0);
8709
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008710 if (!bnx2x_reset_is_done(bp)) {
8711 do {
8712 /* Reset MCP mail box sequence if there is on going
8713 * recovery
8714 */
8715 bp->fw_seq = 0;
8716
8717 /* If it's the first function to load and reset done
8718 * is still not cleared it may mean that. We don't
8719 * check the attention state here because it may have
8720 * already been cleared by a "common" reset but we
8721 * shell proceed with "process kill" anyway.
8722 */
8723 if ((bnx2x_get_load_cnt(bp) == 0) &&
8724 bnx2x_trylock_hw_lock(bp,
8725 HW_LOCK_RESOURCE_RESERVED_08) &&
8726 (!bnx2x_leader_reset(bp))) {
8727 DP(NETIF_MSG_HW, "Recovered in open\n");
8728 break;
8729 }
8730
8731 bnx2x_set_power_state(bp, PCI_D3hot);
8732
8733 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
8734 " completed yet. Try again later. If u still see this"
8735 " message after a few retries then power cycle is"
8736 " required.\n", bp->dev->name);
8737
8738 return -EAGAIN;
8739 } while (0);
8740 }
8741
8742 bp->recovery_state = BNX2X_RECOVERY_DONE;
8743
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008744 return bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008745}
8746
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008747/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008748static int bnx2x_close(struct net_device *dev)
8749{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008750 struct bnx2x *bp = netdev_priv(dev);
8751
8752 /* Unload the driver, release IRQs */
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008753 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
Vladislav Zolotarovd3dbfee2010-04-19 01:14:49 +00008754 bnx2x_set_power_state(bp, PCI_D3hot);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008755
8756 return 0;
8757}
8758
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08008759#define E1_MAX_UC_LIST 29
8760#define E1H_MAX_UC_LIST 30
8761#define E2_MAX_UC_LIST 14
8762static inline u8 bnx2x_max_uc_list(struct bnx2x *bp)
8763{
8764 if (CHIP_IS_E1(bp))
8765 return E1_MAX_UC_LIST;
8766 else if (CHIP_IS_E1H(bp))
8767 return E1H_MAX_UC_LIST;
8768 else
8769 return E2_MAX_UC_LIST;
8770}
8771
8772
8773static inline u8 bnx2x_uc_list_cam_offset(struct bnx2x *bp)
8774{
8775 if (CHIP_IS_E1(bp))
8776 /* CAM Entries for Port0:
8777 * 0 - prim ETH MAC
8778 * 1 - BCAST MAC
8779 * 2 - iSCSI L2 ring ETH MAC
8780 * 3-31 - UC MACs
8781 *
8782 * Port1 entries are allocated the same way starting from
8783 * entry 32.
8784 */
8785 return 3 + 32 * BP_PORT(bp);
8786 else if (CHIP_IS_E1H(bp)) {
8787 /* CAM Entries:
8788 * 0-7 - prim ETH MAC for each function
8789 * 8-15 - iSCSI L2 ring ETH MAC for each function
8790 * 16 till 255 UC MAC lists for each function
8791 *
8792 * Remark: There is no FCoE support for E1H, thus FCoE related
8793 * MACs are not considered.
8794 */
8795 return E1H_FUNC_MAX * (CAM_ISCSI_ETH_LINE + 1) +
8796 bnx2x_max_uc_list(bp) * BP_FUNC(bp);
8797 } else {
8798 /* CAM Entries (there is a separate CAM per engine):
8799 * 0-4 - prim ETH MAC for each function
8800 * 4-7 - iSCSI L2 ring ETH MAC for each function
8801 * 8-11 - FIP ucast L2 MAC for each function
8802 * 12-15 - ALL_ENODE_MACS mcast MAC for each function
8803 * 16 till 71 UC MAC lists for each function
8804 */
8805 u8 func_idx =
8806 (CHIP_MODE_IS_4_PORT(bp) ? BP_FUNC(bp) : BP_VN(bp));
8807
8808 return E2_FUNC_MAX * (CAM_MAX_PF_LINE + 1) +
8809 bnx2x_max_uc_list(bp) * func_idx;
8810 }
8811}
8812
8813/* set uc list, do not wait as wait implies sleep and
8814 * set_rx_mode can be invoked from non-sleepable context.
8815 *
8816 * Instead we use the same ramrod data buffer each time we need
8817 * to configure a list of addresses, and use the fact that the
8818 * list of MACs is changed in an incremental way and that the
8819 * function is called under the netif_addr_lock. A temporary
8820 * inconsistent CAM configuration (possible in case of very fast
8821 * sequence of add/del/add on the host side) will shortly be
8822 * restored by the handler of the last ramrod.
8823 */
8824static int bnx2x_set_uc_list(struct bnx2x *bp)
8825{
8826 int i = 0, old;
8827 struct net_device *dev = bp->dev;
8828 u8 offset = bnx2x_uc_list_cam_offset(bp);
8829 struct netdev_hw_addr *ha;
8830 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
8831 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
8832
8833 if (netdev_uc_count(dev) > bnx2x_max_uc_list(bp))
8834 return -EINVAL;
8835
8836 netdev_for_each_uc_addr(ha, dev) {
8837 /* copy mac */
8838 config_cmd->config_table[i].msb_mac_addr =
8839 swab16(*(u16 *)&bnx2x_uc_addr(ha)[0]);
8840 config_cmd->config_table[i].middle_mac_addr =
8841 swab16(*(u16 *)&bnx2x_uc_addr(ha)[2]);
8842 config_cmd->config_table[i].lsb_mac_addr =
8843 swab16(*(u16 *)&bnx2x_uc_addr(ha)[4]);
8844
8845 config_cmd->config_table[i].vlan_id = 0;
8846 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
8847 config_cmd->config_table[i].clients_bit_vector =
8848 cpu_to_le32(1 << BP_L_ID(bp));
8849
8850 SET_FLAG(config_cmd->config_table[i].flags,
8851 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
8852 T_ETH_MAC_COMMAND_SET);
8853
8854 DP(NETIF_MSG_IFUP,
8855 "setting UCAST[%d] (%04x:%04x:%04x)\n", i,
8856 config_cmd->config_table[i].msb_mac_addr,
8857 config_cmd->config_table[i].middle_mac_addr,
8858 config_cmd->config_table[i].lsb_mac_addr);
8859
8860 i++;
8861
8862 /* Set uc MAC in NIG */
8863 bnx2x_set_mac_in_nig(bp, 1, bnx2x_uc_addr(ha),
8864 LLH_CAM_ETH_LINE + i);
8865 }
8866 old = config_cmd->hdr.length;
8867 if (old > i) {
8868 for (; i < old; i++) {
8869 if (CAM_IS_INVALID(config_cmd->
8870 config_table[i])) {
8871 /* already invalidated */
8872 break;
8873 }
8874 /* invalidate */
8875 SET_FLAG(config_cmd->config_table[i].flags,
8876 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
8877 T_ETH_MAC_COMMAND_INVALIDATE);
8878 }
8879 }
8880
8881 wmb();
8882
8883 config_cmd->hdr.length = i;
8884 config_cmd->hdr.offset = offset;
8885 config_cmd->hdr.client_id = 0xff;
8886 /* Mark that this ramrod doesn't use bp->set_mac_pending for
8887 * synchronization.
8888 */
8889 config_cmd->hdr.echo = 0;
8890
8891 mb();
8892
8893 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
8894 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
8895
8896}
8897
8898void bnx2x_invalidate_uc_list(struct bnx2x *bp)
8899{
8900 int i;
8901 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
8902 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
8903 int ramrod_flags = WAIT_RAMROD_COMMON;
8904 u8 offset = bnx2x_uc_list_cam_offset(bp);
8905 u8 max_list_size = bnx2x_max_uc_list(bp);
8906
8907 for (i = 0; i < max_list_size; i++) {
8908 SET_FLAG(config_cmd->config_table[i].flags,
8909 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
8910 T_ETH_MAC_COMMAND_INVALIDATE);
8911 bnx2x_set_mac_in_nig(bp, 0, NULL, LLH_CAM_ETH_LINE + 1 + i);
8912 }
8913
8914 wmb();
8915
8916 config_cmd->hdr.length = max_list_size;
8917 config_cmd->hdr.offset = offset;
8918 config_cmd->hdr.client_id = 0xff;
8919 /* We'll wait for a completion this time... */
8920 config_cmd->hdr.echo = 1;
8921
8922 bp->set_mac_pending = 1;
8923
8924 mb();
8925
8926 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
8927 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
8928
8929 /* Wait for a completion */
8930 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
8931 ramrod_flags);
8932
8933}
8934
8935static inline int bnx2x_set_mc_list(struct bnx2x *bp)
8936{
8937 /* some multicasts */
8938 if (CHIP_IS_E1(bp)) {
8939 return bnx2x_set_e1_mc_list(bp);
8940 } else { /* E1H and newer */
8941 return bnx2x_set_e1h_mc_list(bp);
8942 }
8943}
8944
Eilon Greensteinf5372252009-02-12 08:38:30 +00008945/* called with netif_tx_lock from dev_mcast.c */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00008946void bnx2x_set_rx_mode(struct net_device *dev)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008947{
8948 struct bnx2x *bp = netdev_priv(dev);
8949 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008950
8951 if (bp->state != BNX2X_STATE_OPEN) {
8952 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
8953 return;
8954 }
8955
8956 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
8957
8958 if (dev->flags & IFF_PROMISC)
8959 rx_mode = BNX2X_RX_MODE_PROMISC;
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08008960 else if (dev->flags & IFF_ALLMULTI)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008961 rx_mode = BNX2X_RX_MODE_ALLMULTI;
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08008962 else {
8963 /* some multicasts */
8964 if (bnx2x_set_mc_list(bp))
8965 rx_mode = BNX2X_RX_MODE_ALLMULTI;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008966
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08008967 /* some unicasts */
8968 if (bnx2x_set_uc_list(bp))
8969 rx_mode = BNX2X_RX_MODE_PROMISC;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008970 }
8971
8972 bp->rx_mode = rx_mode;
8973 bnx2x_set_storm_rx_mode(bp);
8974}
8975
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008976/* called with rtnl_lock */
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008977static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
8978 int devad, u16 addr)
8979{
8980 struct bnx2x *bp = netdev_priv(netdev);
8981 u16 value;
8982 int rc;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008983
8984 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
8985 prtad, devad, addr);
8986
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008987 /* The HW expects different devad if CL22 is used */
8988 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8989
8990 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnere10bc842010-09-07 11:40:50 +00008991 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008992 bnx2x_release_phy_lock(bp);
8993 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
8994
8995 if (!rc)
8996 rc = value;
8997 return rc;
8998}
8999
9000/* called with rtnl_lock */
9001static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
9002 u16 addr, u16 value)
9003{
9004 struct bnx2x *bp = netdev_priv(netdev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00009005 int rc;
9006
9007 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
9008 " value 0x%x\n", prtad, devad, addr, value);
9009
Eilon Greenstein01cd4522009-08-12 08:23:08 +00009010 /* The HW expects different devad if CL22 is used */
9011 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
9012
9013 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnere10bc842010-09-07 11:40:50 +00009014 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00009015 bnx2x_release_phy_lock(bp);
9016 return rc;
9017}
9018
9019/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009020static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9021{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009022 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00009023 struct mii_ioctl_data *mdio = if_mii(ifr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009024
Eilon Greenstein01cd4522009-08-12 08:23:08 +00009025 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
9026 mdio->phy_id, mdio->reg_num, mdio->val_in);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009027
Eilon Greenstein01cd4522009-08-12 08:23:08 +00009028 if (!netif_running(dev))
9029 return -EAGAIN;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009030
Eilon Greenstein01cd4522009-08-12 08:23:08 +00009031 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009032}
9033
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +00009034#ifdef CONFIG_NET_POLL_CONTROLLER
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009035static void poll_bnx2x(struct net_device *dev)
9036{
9037 struct bnx2x *bp = netdev_priv(dev);
9038
9039 disable_irq(bp->pdev->irq);
9040 bnx2x_interrupt(bp->pdev->irq, dev);
9041 enable_irq(bp->pdev->irq);
9042}
9043#endif
9044
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08009045static const struct net_device_ops bnx2x_netdev_ops = {
9046 .ndo_open = bnx2x_open,
9047 .ndo_stop = bnx2x_close,
9048 .ndo_start_xmit = bnx2x_start_xmit,
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00009049 .ndo_select_queue = bnx2x_select_queue,
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08009050 .ndo_set_rx_mode = bnx2x_set_rx_mode,
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08009051 .ndo_set_mac_address = bnx2x_change_mac_addr,
9052 .ndo_validate_addr = eth_validate_addr,
9053 .ndo_do_ioctl = bnx2x_ioctl,
9054 .ndo_change_mtu = bnx2x_change_mtu,
Michał Mirosław66371c42011-04-12 09:38:23 +00009055 .ndo_fix_features = bnx2x_fix_features,
9056 .ndo_set_features = bnx2x_set_features,
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08009057 .ndo_tx_timeout = bnx2x_tx_timeout,
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +00009058#ifdef CONFIG_NET_POLL_CONTROLLER
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08009059 .ndo_poll_controller = poll_bnx2x,
9060#endif
9061};
9062
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009063static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
9064 struct net_device *dev)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009065{
9066 struct bnx2x *bp;
9067 int rc;
9068
9069 SET_NETDEV_DEV(dev, &pdev->dev);
9070 bp = netdev_priv(dev);
9071
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009072 bp->dev = dev;
9073 bp->pdev = pdev;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009074 bp->flags = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009075 bp->pf_num = PCI_FUNC(pdev->devfn);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009076
9077 rc = pci_enable_device(pdev);
9078 if (rc) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009079 dev_err(&bp->pdev->dev,
9080 "Cannot enable PCI device, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009081 goto err_out;
9082 }
9083
9084 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009085 dev_err(&bp->pdev->dev,
9086 "Cannot find PCI device base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009087 rc = -ENODEV;
9088 goto err_out_disable;
9089 }
9090
9091 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009092 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
9093 " base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009094 rc = -ENODEV;
9095 goto err_out_disable;
9096 }
9097
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009098 if (atomic_read(&pdev->enable_cnt) == 1) {
9099 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9100 if (rc) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009101 dev_err(&bp->pdev->dev,
9102 "Cannot obtain PCI resources, aborting\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009103 goto err_out_disable;
9104 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009105
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009106 pci_set_master(pdev);
9107 pci_save_state(pdev);
9108 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009109
9110 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9111 if (bp->pm_cap == 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009112 dev_err(&bp->pdev->dev,
9113 "Cannot find power management capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009114 rc = -EIO;
9115 goto err_out_release;
9116 }
9117
9118 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9119 if (bp->pcie_cap == 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009120 dev_err(&bp->pdev->dev,
9121 "Cannot find PCI Express capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009122 rc = -EIO;
9123 goto err_out_release;
9124 }
9125
FUJITA Tomonori1a983142010-04-04 01:51:03 +00009126 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009127 bp->flags |= USING_DAC_FLAG;
FUJITA Tomonori1a983142010-04-04 01:51:03 +00009128 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009129 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
9130 " failed, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009131 rc = -EIO;
9132 goto err_out_release;
9133 }
9134
FUJITA Tomonori1a983142010-04-04 01:51:03 +00009135 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009136 dev_err(&bp->pdev->dev,
9137 "System does not support DMA, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009138 rc = -EIO;
9139 goto err_out_release;
9140 }
9141
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009142 dev->mem_start = pci_resource_start(pdev, 0);
9143 dev->base_addr = dev->mem_start;
9144 dev->mem_end = pci_resource_end(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009145
9146 dev->irq = pdev->irq;
9147
Arjan van de Ven275f1652008-10-20 21:42:39 -07009148 bp->regview = pci_ioremap_bar(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009149 if (!bp->regview) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009150 dev_err(&bp->pdev->dev,
9151 "Cannot map register space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009152 rc = -ENOMEM;
9153 goto err_out_release;
9154 }
9155
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009156 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009157 min_t(u64, BNX2X_DB_SIZE(bp),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009158 pci_resource_len(pdev, 2)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009159 if (!bp->doorbells) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009160 dev_err(&bp->pdev->dev,
9161 "Cannot map doorbell space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009162 rc = -ENOMEM;
9163 goto err_out_unmap;
9164 }
9165
9166 bnx2x_set_power_state(bp, PCI_D0);
9167
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009168 /* clean indirect addresses */
9169 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
9170 PCICFG_VENDOR_ID_OFFSET);
9171 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
9172 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
9173 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
9174 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009175
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009176 /* Reset the load counter */
9177 bnx2x_clear_load_cnt(bp);
9178
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009179 dev->watchdog_timeo = TX_TIMEOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009180
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08009181 dev->netdev_ops = &bnx2x_netdev_ops;
Dmitry Kravkovde0c62d2010-07-27 12:35:24 +00009182 bnx2x_set_ethtool_ops(dev);
Michał Mirosław66371c42011-04-12 09:38:23 +00009183
9184 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
9185 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
9186 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_HW_VLAN_TX;
9187
9188 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
9189 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
9190
9191 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009192 if (bp->flags & USING_DAC_FLAG)
9193 dev->features |= NETIF_F_HIGHDMA;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009194
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00009195 /* Add Loopback capability to the device */
9196 dev->hw_features |= NETIF_F_LOOPBACK;
9197
Shmulik Ravid98507672011-02-28 12:19:55 -08009198#ifdef BCM_DCBNL
Shmulik Ravid785b9b12010-12-30 06:27:03 +00009199 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
9200#endif
9201
Eilon Greenstein01cd4522009-08-12 08:23:08 +00009202 /* get_port_hwinfo() will set prtad and mmds properly */
9203 bp->mdio.prtad = MDIO_PRTAD_NONE;
9204 bp->mdio.mmds = 0;
9205 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
9206 bp->mdio.dev = dev;
9207 bp->mdio.mdio_read = bnx2x_mdio_read;
9208 bp->mdio.mdio_write = bnx2x_mdio_write;
9209
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009210 return 0;
9211
9212err_out_unmap:
9213 if (bp->regview) {
9214 iounmap(bp->regview);
9215 bp->regview = NULL;
9216 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009217 if (bp->doorbells) {
9218 iounmap(bp->doorbells);
9219 bp->doorbells = NULL;
9220 }
9221
9222err_out_release:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009223 if (atomic_read(&pdev->enable_cnt) == 1)
9224 pci_release_regions(pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009225
9226err_out_disable:
9227 pci_disable_device(pdev);
9228 pci_set_drvdata(pdev, NULL);
9229
9230err_out:
9231 return rc;
9232}
9233
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009234static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
9235 int *width, int *speed)
Eliezer Tamir25047952008-02-28 11:50:16 -08009236{
9237 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9238
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009239 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
9240
9241 /* return value of 1=2.5GHz 2=5GHz */
9242 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
Eliezer Tamir25047952008-02-28 11:50:16 -08009243}
9244
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009245static int bnx2x_check_firmware(struct bnx2x *bp)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009246{
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009247 const struct firmware *firmware = bp->firmware;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009248 struct bnx2x_fw_file_hdr *fw_hdr;
9249 struct bnx2x_fw_file_section *sections;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009250 u32 offset, len, num_ops;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009251 u16 *ops_offsets;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009252 int i;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009253 const u8 *fw_ver;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009254
9255 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
9256 return -EINVAL;
9257
9258 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
9259 sections = (struct bnx2x_fw_file_section *)fw_hdr;
9260
9261 /* Make sure none of the offsets and sizes make us read beyond
9262 * the end of the firmware data */
9263 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
9264 offset = be32_to_cpu(sections[i].offset);
9265 len = be32_to_cpu(sections[i].len);
9266 if (offset + len > firmware->size) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009267 dev_err(&bp->pdev->dev,
9268 "Section %d length is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009269 return -EINVAL;
9270 }
9271 }
9272
9273 /* Likewise for the init_ops offsets */
9274 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
9275 ops_offsets = (u16 *)(firmware->data + offset);
9276 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
9277
9278 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
9279 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009280 dev_err(&bp->pdev->dev,
9281 "Section offset %d is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009282 return -EINVAL;
9283 }
9284 }
9285
9286 /* Check FW version */
9287 offset = be32_to_cpu(fw_hdr->fw_version.offset);
9288 fw_ver = firmware->data + offset;
9289 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
9290 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
9291 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
9292 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009293 dev_err(&bp->pdev->dev,
9294 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009295 fw_ver[0], fw_ver[1], fw_ver[2],
9296 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
9297 BCM_5710_FW_MINOR_VERSION,
9298 BCM_5710_FW_REVISION_VERSION,
9299 BCM_5710_FW_ENGINEERING_VERSION);
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009300 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009301 }
9302
9303 return 0;
9304}
9305
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009306static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009307{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009308 const __be32 *source = (const __be32 *)_source;
9309 u32 *target = (u32 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009310 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009311
9312 for (i = 0; i < n/4; i++)
9313 target[i] = be32_to_cpu(source[i]);
9314}
9315
9316/*
9317 Ops array is stored in the following format:
9318 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
9319 */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009320static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009321{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009322 const __be32 *source = (const __be32 *)_source;
9323 struct raw_op *target = (struct raw_op *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009324 u32 i, j, tmp;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009325
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009326 for (i = 0, j = 0; i < n/8; i++, j += 2) {
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009327 tmp = be32_to_cpu(source[j]);
9328 target[i].op = (tmp >> 24) & 0xff;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009329 target[i].offset = tmp & 0xffffff;
9330 target[i].raw_data = be32_to_cpu(source[j + 1]);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009331 }
9332}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009333
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009334/**
9335 * IRO array is stored in the following format:
9336 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
9337 */
9338static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
9339{
9340 const __be32 *source = (const __be32 *)_source;
9341 struct iro *target = (struct iro *)_target;
9342 u32 i, j, tmp;
9343
9344 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
9345 target[i].base = be32_to_cpu(source[j]);
9346 j++;
9347 tmp = be32_to_cpu(source[j]);
9348 target[i].m1 = (tmp >> 16) & 0xffff;
9349 target[i].m2 = tmp & 0xffff;
9350 j++;
9351 tmp = be32_to_cpu(source[j]);
9352 target[i].m3 = (tmp >> 16) & 0xffff;
9353 target[i].size = tmp & 0xffff;
9354 j++;
9355 }
9356}
9357
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009358static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009359{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009360 const __be16 *source = (const __be16 *)_source;
9361 u16 *target = (u16 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009362 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009363
9364 for (i = 0; i < n/2; i++)
9365 target[i] = be16_to_cpu(source[i]);
9366}
9367
Joe Perches7995c642010-02-17 15:01:52 +00009368#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
9369do { \
9370 u32 len = be32_to_cpu(fw_hdr->arr.len); \
9371 bp->arr = kmalloc(len, GFP_KERNEL); \
9372 if (!bp->arr) { \
9373 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
9374 goto lbl; \
9375 } \
9376 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
9377 (u8 *)bp->arr, len); \
9378} while (0)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009379
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009380int bnx2x_init_firmware(struct bnx2x *bp)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009381{
Ben Hutchings45229b42009-11-07 11:53:39 +00009382 const char *fw_file_name;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009383 struct bnx2x_fw_file_hdr *fw_hdr;
Ben Hutchings45229b42009-11-07 11:53:39 +00009384 int rc;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009385
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009386 if (CHIP_IS_E1(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +00009387 fw_file_name = FW_FILE_NAME_E1;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009388 else if (CHIP_IS_E1H(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +00009389 fw_file_name = FW_FILE_NAME_E1H;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009390 else if (CHIP_IS_E2(bp))
9391 fw_file_name = FW_FILE_NAME_E2;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009392 else {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009393 BNX2X_ERR("Unsupported chip revision\n");
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009394 return -EINVAL;
9395 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009396
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009397 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009398
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009399 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009400 if (rc) {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009401 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009402 goto request_firmware_exit;
9403 }
9404
9405 rc = bnx2x_check_firmware(bp);
9406 if (rc) {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009407 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009408 goto request_firmware_exit;
9409 }
9410
9411 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
9412
9413 /* Initialize the pointers to the init arrays */
9414 /* Blob */
9415 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
9416
9417 /* Opcodes */
9418 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
9419
9420 /* Offsets */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009421 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
9422 be16_to_cpu_n);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009423
9424 /* STORMs firmware */
Eilon Greenstein573f2032009-08-12 08:24:14 +00009425 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9426 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
9427 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
9428 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
9429 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9430 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
9431 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
9432 be32_to_cpu(fw_hdr->usem_pram_data.offset);
9433 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9434 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
9435 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
9436 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
9437 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9438 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
9439 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
9440 be32_to_cpu(fw_hdr->csem_pram_data.offset);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009441 /* IRO */
9442 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009443
9444 return 0;
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009445
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009446iro_alloc_err:
9447 kfree(bp->init_ops_offsets);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009448init_offsets_alloc_err:
9449 kfree(bp->init_ops);
9450init_ops_alloc_err:
9451 kfree(bp->init_data);
9452request_firmware_exit:
9453 release_firmware(bp->firmware);
9454
9455 return rc;
9456}
9457
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009458static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
9459{
9460 int cid_count = L2_FP_COUNT(l2_cid_count);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009461
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009462#ifdef BCM_CNIC
9463 cid_count += CNIC_CID_MAX;
9464#endif
9465 return roundup(cid_count, QM_CID_ROUND);
9466}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00009467
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009468static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9469 const struct pci_device_id *ent)
9470{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009471 struct net_device *dev = NULL;
9472 struct bnx2x *bp;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009473 int pcie_width, pcie_speed;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009474 int rc, cid_count;
9475
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009476 switch (ent->driver_data) {
9477 case BCM57710:
9478 case BCM57711:
9479 case BCM57711E:
9480 cid_count = FP_SB_MAX_E1x;
9481 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009482
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009483 case BCM57712:
9484 case BCM57712E:
9485 cid_count = FP_SB_MAX_E2;
9486 break;
9487
9488 default:
9489 pr_err("Unknown board_type (%ld), aborting\n",
9490 ent->driver_data);
Vasiliy Kulikov870634b2010-11-14 10:08:34 +00009491 return -ENODEV;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009492 }
9493
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00009494 cid_count += NONE_ETH_CONTEXT_USE + CNIC_CONTEXT_USE;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00009495
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009496 /* dev zeroed in init_etherdev */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009497 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009498 if (!dev) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009499 dev_err(&pdev->dev, "Cannot allocate net device\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009500 return -ENOMEM;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009501 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009502
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009503 bp = netdev_priv(dev);
Joe Perches7995c642010-02-17 15:01:52 +00009504 bp->msg_enable = debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009505
Eilon Greensteindf4770de2009-08-12 08:23:28 +00009506 pci_set_drvdata(pdev, dev);
9507
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009508 bp->l2_cid_count = cid_count;
9509
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009510 rc = bnx2x_init_dev(pdev, dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009511 if (rc < 0) {
9512 free_netdev(dev);
9513 return rc;
9514 }
9515
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009516 rc = bnx2x_init_bp(bp);
Eilon Greenstein693fc0d2009-01-14 06:43:52 +00009517 if (rc)
9518 goto init_one_exit;
9519
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009520 /* calc qm_cid_count */
9521 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9522
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00009523#ifdef BCM_CNIC
9524 /* disable FCOE L2 queue for E1x*/
9525 if (CHIP_IS_E1x(bp))
9526 bp->flags |= NO_FCOE_FLAG;
9527
9528#endif
9529
Lucas De Marchi25985ed2011-03-30 22:57:33 -03009530 /* Configure interrupt mode: try to enable MSI-X/MSI if
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009531 * needed, set bp->num_queues appropriately.
9532 */
9533 bnx2x_set_int_mode(bp);
9534
9535 /* Add all NAPI objects */
9536 bnx2x_add_all_napi(bp);
9537
Vladislav Zolotarovb3400072010-11-24 11:09:50 -08009538 rc = register_netdev(dev);
9539 if (rc) {
9540 dev_err(&pdev->dev, "Cannot register net device\n");
9541 goto init_one_exit;
9542 }
9543
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00009544#ifdef BCM_CNIC
9545 if (!NO_FCOE(bp)) {
9546 /* Add storage MAC address */
9547 rtnl_lock();
9548 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
9549 rtnl_unlock();
9550 }
9551#endif
9552
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009553 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009554
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009555 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9556 " IRQ %d, ", board_info[ent->driver_data].name,
9557 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009558 pcie_width,
9559 ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9560 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9561 "5GHz (Gen2)" : "2.5GHz",
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009562 dev->base_addr, bp->pdev->irq);
9563 pr_cont("node addr %pM\n", dev->dev_addr);
Eilon Greensteinc0162012009-03-02 08:01:05 +00009564
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009565 return 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009566
9567init_one_exit:
9568 if (bp->regview)
9569 iounmap(bp->regview);
9570
9571 if (bp->doorbells)
9572 iounmap(bp->doorbells);
9573
9574 free_netdev(dev);
9575
9576 if (atomic_read(&pdev->enable_cnt) == 1)
9577 pci_release_regions(pdev);
9578
9579 pci_disable_device(pdev);
9580 pci_set_drvdata(pdev, NULL);
9581
9582 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009583}
9584
9585static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9586{
9587 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -08009588 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009589
Eliezer Tamir228241e2008-02-28 11:56:57 -08009590 if (!dev) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009591 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
Eliezer Tamir228241e2008-02-28 11:56:57 -08009592 return;
9593 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08009594 bp = netdev_priv(dev);
9595
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00009596#ifdef BCM_CNIC
9597 /* Delete storage MAC address */
9598 if (!NO_FCOE(bp)) {
9599 rtnl_lock();
9600 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
9601 rtnl_unlock();
9602 }
9603#endif
9604
Shmulik Ravid98507672011-02-28 12:19:55 -08009605#ifdef BCM_DCBNL
9606 /* Delete app tlvs from dcbnl */
9607 bnx2x_dcbnl_update_applist(bp, true);
9608#endif
9609
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009610 unregister_netdev(dev);
9611
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009612 /* Delete all NAPI objects */
9613 bnx2x_del_all_napi(bp);
9614
Vladislav Zolotarov084d6cb2011-01-09 02:20:19 +00009615 /* Power on: we can't let PCI layer write to us while we are in D3 */
9616 bnx2x_set_power_state(bp, PCI_D0);
9617
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009618 /* Disable MSI/MSI-X */
9619 bnx2x_disable_msi(bp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00009620
Vladislav Zolotarov084d6cb2011-01-09 02:20:19 +00009621 /* Power off */
9622 bnx2x_set_power_state(bp, PCI_D3hot);
9623
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009624 /* Make sure RESET task is not scheduled before continuing */
9625 cancel_delayed_work_sync(&bp->reset_task);
9626
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009627 if (bp->regview)
9628 iounmap(bp->regview);
9629
9630 if (bp->doorbells)
9631 iounmap(bp->doorbells);
9632
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009633 bnx2x_free_mem_bp(bp);
9634
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009635 free_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009636
9637 if (atomic_read(&pdev->enable_cnt) == 1)
9638 pci_release_regions(pdev);
9639
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009640 pci_disable_device(pdev);
9641 pci_set_drvdata(pdev, NULL);
9642}
9643
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009644static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
9645{
9646 int i;
9647
9648 bp->state = BNX2X_STATE_ERROR;
9649
9650 bp->rx_mode = BNX2X_RX_MODE_NONE;
9651
9652 bnx2x_netif_stop(bp, 0);
Stanislaw Gruszkac89af1a2010-05-17 17:35:38 -07009653 netif_carrier_off(bp->dev);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009654
9655 del_timer_sync(&bp->timer);
9656 bp->stats_state = STATS_STATE_DISABLED;
9657 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
9658
9659 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009660 bnx2x_free_irq(bp);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009661
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009662 /* Free SKBs, SGEs, TPA pool and driver internals */
9663 bnx2x_free_skbs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009664
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00009665 for_each_rx_queue(bp, i)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009666 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009667
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009668 bnx2x_free_mem(bp);
9669
9670 bp->state = BNX2X_STATE_CLOSED;
9671
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009672 return 0;
9673}
9674
9675static void bnx2x_eeh_recover(struct bnx2x *bp)
9676{
9677 u32 val;
9678
9679 mutex_init(&bp->port.phy_mutex);
9680
9681 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9682 bp->link_params.shmem_base = bp->common.shmem_base;
9683 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
9684
9685 if (!bp->common.shmem_base ||
9686 (bp->common.shmem_base < 0xA0000) ||
9687 (bp->common.shmem_base >= 0xC0000)) {
9688 BNX2X_DEV_INFO("MCP not active\n");
9689 bp->flags |= NO_MCP_FLAG;
9690 return;
9691 }
9692
9693 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9694 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9695 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9696 BNX2X_ERR("BAD MCP validity signature\n");
9697
9698 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009699 bp->fw_seq =
9700 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9701 DRV_MSG_SEQ_NUMBER_MASK);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009702 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9703 }
9704}
9705
Wendy Xiong493adb12008-06-23 20:36:22 -07009706/**
9707 * bnx2x_io_error_detected - called when PCI error is detected
9708 * @pdev: Pointer to PCI device
9709 * @state: The current pci connection state
9710 *
9711 * This function is called after a PCI bus error affecting
9712 * this device has been detected.
9713 */
9714static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
9715 pci_channel_state_t state)
9716{
9717 struct net_device *dev = pci_get_drvdata(pdev);
9718 struct bnx2x *bp = netdev_priv(dev);
9719
9720 rtnl_lock();
9721
9722 netif_device_detach(dev);
9723
Dean Nelson07ce50e2009-07-31 09:13:25 +00009724 if (state == pci_channel_io_perm_failure) {
9725 rtnl_unlock();
9726 return PCI_ERS_RESULT_DISCONNECT;
9727 }
9728
Wendy Xiong493adb12008-06-23 20:36:22 -07009729 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009730 bnx2x_eeh_nic_unload(bp);
Wendy Xiong493adb12008-06-23 20:36:22 -07009731
9732 pci_disable_device(pdev);
9733
9734 rtnl_unlock();
9735
9736 /* Request a slot reset */
9737 return PCI_ERS_RESULT_NEED_RESET;
9738}
9739
9740/**
9741 * bnx2x_io_slot_reset - called after the PCI bus has been reset
9742 * @pdev: Pointer to PCI device
9743 *
9744 * Restart the card from scratch, as if from a cold-boot.
9745 */
9746static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
9747{
9748 struct net_device *dev = pci_get_drvdata(pdev);
9749 struct bnx2x *bp = netdev_priv(dev);
9750
9751 rtnl_lock();
9752
9753 if (pci_enable_device(pdev)) {
9754 dev_err(&pdev->dev,
9755 "Cannot re-enable PCI device after reset\n");
9756 rtnl_unlock();
9757 return PCI_ERS_RESULT_DISCONNECT;
9758 }
9759
9760 pci_set_master(pdev);
9761 pci_restore_state(pdev);
9762
9763 if (netif_running(dev))
9764 bnx2x_set_power_state(bp, PCI_D0);
9765
9766 rtnl_unlock();
9767
9768 return PCI_ERS_RESULT_RECOVERED;
9769}
9770
9771/**
9772 * bnx2x_io_resume - called when traffic can start flowing again
9773 * @pdev: Pointer to PCI device
9774 *
9775 * This callback is called when the error recovery driver tells us that
9776 * its OK to resume normal operation.
9777 */
9778static void bnx2x_io_resume(struct pci_dev *pdev)
9779{
9780 struct net_device *dev = pci_get_drvdata(pdev);
9781 struct bnx2x *bp = netdev_priv(dev);
9782
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009783 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009784 printk(KERN_ERR "Handling parity error recovery. "
9785 "Try again later\n");
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009786 return;
9787 }
9788
Wendy Xiong493adb12008-06-23 20:36:22 -07009789 rtnl_lock();
9790
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009791 bnx2x_eeh_recover(bp);
9792
Wendy Xiong493adb12008-06-23 20:36:22 -07009793 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009794 bnx2x_nic_load(bp, LOAD_NORMAL);
Wendy Xiong493adb12008-06-23 20:36:22 -07009795
9796 netif_device_attach(dev);
9797
9798 rtnl_unlock();
9799}
9800
9801static struct pci_error_handlers bnx2x_err_handler = {
9802 .error_detected = bnx2x_io_error_detected,
Eilon Greenstein356e2382009-02-12 08:38:32 +00009803 .slot_reset = bnx2x_io_slot_reset,
9804 .resume = bnx2x_io_resume,
Wendy Xiong493adb12008-06-23 20:36:22 -07009805};
9806
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009807static struct pci_driver bnx2x_pci_driver = {
Wendy Xiong493adb12008-06-23 20:36:22 -07009808 .name = DRV_MODULE_NAME,
9809 .id_table = bnx2x_pci_tbl,
9810 .probe = bnx2x_init_one,
9811 .remove = __devexit_p(bnx2x_remove_one),
9812 .suspend = bnx2x_suspend,
9813 .resume = bnx2x_resume,
9814 .err_handler = &bnx2x_err_handler,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009815};
9816
9817static int __init bnx2x_init(void)
9818{
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009819 int ret;
9820
Joe Perches7995c642010-02-17 15:01:52 +00009821 pr_info("%s", version);
Eilon Greenstein938cf542009-08-12 08:23:37 +00009822
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009823 bnx2x_wq = create_singlethread_workqueue("bnx2x");
9824 if (bnx2x_wq == NULL) {
Joe Perches7995c642010-02-17 15:01:52 +00009825 pr_err("Cannot create workqueue\n");
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009826 return -ENOMEM;
9827 }
9828
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009829 ret = pci_register_driver(&bnx2x_pci_driver);
9830 if (ret) {
Joe Perches7995c642010-02-17 15:01:52 +00009831 pr_err("Cannot register driver\n");
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009832 destroy_workqueue(bnx2x_wq);
9833 }
9834 return ret;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009835}
9836
9837static void __exit bnx2x_cleanup(void)
9838{
9839 pci_unregister_driver(&bnx2x_pci_driver);
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009840
9841 destroy_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009842}
9843
9844module_init(bnx2x_init);
9845module_exit(bnx2x_cleanup);
9846
Michael Chan993ac7b2009-10-10 13:46:56 +00009847#ifdef BCM_CNIC
9848
9849/* count denotes the number of new completions we have seen */
9850static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9851{
9852 struct eth_spe *spe;
9853
9854#ifdef BNX2X_STOP_ON_ERROR
9855 if (unlikely(bp->panic))
9856 return;
9857#endif
9858
9859 spin_lock_bh(&bp->spq_lock);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009860 BUG_ON(bp->cnic_spq_pending < count);
Michael Chan993ac7b2009-10-10 13:46:56 +00009861 bp->cnic_spq_pending -= count;
9862
Michael Chan993ac7b2009-10-10 13:46:56 +00009863
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009864 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
9865 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
9866 & SPE_HDR_CONN_TYPE) >>
9867 SPE_HDR_CONN_TYPE_SHIFT;
9868
9869 /* Set validation for iSCSI L2 client before sending SETUP
9870 * ramrod
9871 */
9872 if (type == ETH_CONNECTION_TYPE) {
9873 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
9874 hdr.conn_and_cmd_data) >>
9875 SPE_HDR_CMD_ID_SHIFT) & 0xff;
9876
9877 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
9878 bnx2x_set_ctx_validation(&bp->context.
9879 vcxt[BNX2X_ISCSI_ETH_CID].eth,
9880 HW_CID(bp, BNX2X_ISCSI_ETH_CID));
9881 }
9882
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08009883 /* There may be not more than 8 L2 and not more than 8 L5 SPEs
9884 * We also check that the number of outstanding
9885 * COMMON ramrods is not more than the EQ and SPQ can
9886 * accommodate.
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009887 */
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08009888 if (type == ETH_CONNECTION_TYPE) {
9889 if (!atomic_read(&bp->cq_spq_left))
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009890 break;
9891 else
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08009892 atomic_dec(&bp->cq_spq_left);
9893 } else if (type == NONE_CONNECTION_TYPE) {
9894 if (!atomic_read(&bp->eq_spq_left))
9895 break;
9896 else
9897 atomic_dec(&bp->eq_spq_left);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00009898 } else if ((type == ISCSI_CONNECTION_TYPE) ||
9899 (type == FCOE_CONNECTION_TYPE)) {
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009900 if (bp->cnic_spq_pending >=
9901 bp->cnic_eth_dev.max_kwqe_pending)
9902 break;
9903 else
9904 bp->cnic_spq_pending++;
9905 } else {
9906 BNX2X_ERR("Unknown SPE type: %d\n", type);
9907 bnx2x_panic();
Michael Chan993ac7b2009-10-10 13:46:56 +00009908 break;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009909 }
Michael Chan993ac7b2009-10-10 13:46:56 +00009910
9911 spe = bnx2x_sp_get_next(bp);
9912 *spe = *bp->cnic_kwq_cons;
9913
Michael Chan993ac7b2009-10-10 13:46:56 +00009914 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
9915 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
9916
9917 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
9918 bp->cnic_kwq_cons = bp->cnic_kwq;
9919 else
9920 bp->cnic_kwq_cons++;
9921 }
9922 bnx2x_sp_prod_update(bp);
9923 spin_unlock_bh(&bp->spq_lock);
9924}
9925
9926static int bnx2x_cnic_sp_queue(struct net_device *dev,
9927 struct kwqe_16 *kwqes[], u32 count)
9928{
9929 struct bnx2x *bp = netdev_priv(dev);
9930 int i;
9931
9932#ifdef BNX2X_STOP_ON_ERROR
9933 if (unlikely(bp->panic))
9934 return -EIO;
9935#endif
9936
9937 spin_lock_bh(&bp->spq_lock);
9938
9939 for (i = 0; i < count; i++) {
9940 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
9941
9942 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
9943 break;
9944
9945 *bp->cnic_kwq_prod = *spe;
9946
9947 bp->cnic_kwq_pending++;
9948
9949 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
9950 spe->hdr.conn_and_cmd_data, spe->hdr.type,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009951 spe->data.update_data_addr.hi,
9952 spe->data.update_data_addr.lo,
Michael Chan993ac7b2009-10-10 13:46:56 +00009953 bp->cnic_kwq_pending);
9954
9955 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
9956 bp->cnic_kwq_prod = bp->cnic_kwq;
9957 else
9958 bp->cnic_kwq_prod++;
9959 }
9960
9961 spin_unlock_bh(&bp->spq_lock);
9962
9963 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
9964 bnx2x_cnic_sp_post(bp, 0);
9965
9966 return i;
9967}
9968
9969static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9970{
9971 struct cnic_ops *c_ops;
9972 int rc = 0;
9973
9974 mutex_lock(&bp->cnic_mutex);
Eric Dumazet13707f92011-01-26 19:28:23 +00009975 c_ops = rcu_dereference_protected(bp->cnic_ops,
9976 lockdep_is_held(&bp->cnic_mutex));
Michael Chan993ac7b2009-10-10 13:46:56 +00009977 if (c_ops)
9978 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9979 mutex_unlock(&bp->cnic_mutex);
9980
9981 return rc;
9982}
9983
9984static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9985{
9986 struct cnic_ops *c_ops;
9987 int rc = 0;
9988
9989 rcu_read_lock();
9990 c_ops = rcu_dereference(bp->cnic_ops);
9991 if (c_ops)
9992 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9993 rcu_read_unlock();
9994
9995 return rc;
9996}
9997
9998/*
9999 * for commands that have no data
10000 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000010001int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
Michael Chan993ac7b2009-10-10 13:46:56 +000010002{
10003 struct cnic_ctl_info ctl = {0};
10004
10005 ctl.cmd = cmd;
10006
10007 return bnx2x_cnic_ctl_send(bp, &ctl);
10008}
10009
10010static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
10011{
10012 struct cnic_ctl_info ctl;
10013
10014 /* first we tell CNIC and only then we count this as a completion */
10015 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
10016 ctl.data.comp.cid = cid;
10017
10018 bnx2x_cnic_ctl_send_bh(bp, &ctl);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +000010019 bnx2x_cnic_sp_post(bp, 0);
Michael Chan993ac7b2009-10-10 13:46:56 +000010020}
10021
10022static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
10023{
10024 struct bnx2x *bp = netdev_priv(dev);
10025 int rc = 0;
10026
10027 switch (ctl->cmd) {
10028 case DRV_CTL_CTXTBL_WR_CMD: {
10029 u32 index = ctl->data.io.offset;
10030 dma_addr_t addr = ctl->data.io.dma_addr;
10031
10032 bnx2x_ilt_wr(bp, index, addr);
10033 break;
10034 }
10035
Dmitry Kravkovc2bff632010-10-06 03:33:18 +000010036 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
10037 int count = ctl->data.credit.credit_count;
Michael Chan993ac7b2009-10-10 13:46:56 +000010038
10039 bnx2x_cnic_sp_post(bp, count);
10040 break;
10041 }
10042
10043 /* rtnl_lock is held. */
10044 case DRV_CTL_START_L2_CMD: {
10045 u32 cli = ctl->data.ring.client_id;
10046
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +000010047 /* Clear FCoE FIP and ALL ENODE MACs addresses first */
10048 bnx2x_del_fcoe_eth_macs(bp);
10049
Dmitry Kravkov523224a2010-10-06 03:23:26 +000010050 /* Set iSCSI MAC address */
10051 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
10052
10053 mmiowb();
10054 barrier();
10055
10056 /* Start accepting on iSCSI L2 ring. Accept all multicasts
10057 * because it's the only way for UIO Client to accept
10058 * multicasts (in non-promiscuous mode only one Client per
10059 * function will receive multicast packets (leading in our
10060 * case).
10061 */
10062 bnx2x_rxq_set_mac_filters(bp, cli,
10063 BNX2X_ACCEPT_UNICAST |
10064 BNX2X_ACCEPT_BROADCAST |
10065 BNX2X_ACCEPT_ALL_MULTICAST);
10066 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
10067
Michael Chan993ac7b2009-10-10 13:46:56 +000010068 break;
10069 }
10070
10071 /* rtnl_lock is held. */
10072 case DRV_CTL_STOP_L2_CMD: {
10073 u32 cli = ctl->data.ring.client_id;
10074
Dmitry Kravkov523224a2010-10-06 03:23:26 +000010075 /* Stop accepting on iSCSI L2 ring */
10076 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
10077 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
10078
10079 mmiowb();
10080 barrier();
10081
10082 /* Unset iSCSI L2 MAC */
10083 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
Michael Chan993ac7b2009-10-10 13:46:56 +000010084 break;
10085 }
Dmitry Kravkovc2bff632010-10-06 03:33:18 +000010086 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
10087 int count = ctl->data.credit.credit_count;
10088
10089 smp_mb__before_atomic_inc();
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -080010090 atomic_add(count, &bp->cq_spq_left);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +000010091 smp_mb__after_atomic_inc();
10092 break;
10093 }
Michael Chan993ac7b2009-10-10 13:46:56 +000010094
Dmitry Kravkovfab0dc82011-03-31 17:04:22 -070010095 case DRV_CTL_ISCSI_STOPPED_CMD: {
10096 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_ISCSI_STOPPED);
10097 break;
10098 }
10099
Michael Chan993ac7b2009-10-10 13:46:56 +000010100 default:
10101 BNX2X_ERR("unknown command %x\n", ctl->cmd);
10102 rc = -EINVAL;
10103 }
10104
10105 return rc;
10106}
10107
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000010108void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
Michael Chan993ac7b2009-10-10 13:46:56 +000010109{
10110 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10111
10112 if (bp->flags & USING_MSIX_FLAG) {
10113 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
10114 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
10115 cp->irq_arr[0].vector = bp->msix_table[1].vector;
10116 } else {
10117 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
10118 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
10119 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000010120 if (CHIP_IS_E2(bp))
10121 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
10122 else
10123 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
10124
Michael Chan993ac7b2009-10-10 13:46:56 +000010125 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +000010126 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
Michael Chan993ac7b2009-10-10 13:46:56 +000010127 cp->irq_arr[1].status_blk = bp->def_status_blk;
10128 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
Dmitry Kravkov523224a2010-10-06 03:23:26 +000010129 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
Michael Chan993ac7b2009-10-10 13:46:56 +000010130
10131 cp->num_irq = 2;
10132}
10133
10134static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
10135 void *data)
10136{
10137 struct bnx2x *bp = netdev_priv(dev);
10138 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10139
10140 if (ops == NULL)
10141 return -EINVAL;
10142
Michael Chan993ac7b2009-10-10 13:46:56 +000010143 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
10144 if (!bp->cnic_kwq)
10145 return -ENOMEM;
10146
10147 bp->cnic_kwq_cons = bp->cnic_kwq;
10148 bp->cnic_kwq_prod = bp->cnic_kwq;
10149 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
10150
10151 bp->cnic_spq_pending = 0;
10152 bp->cnic_kwq_pending = 0;
10153
10154 bp->cnic_data = data;
10155
10156 cp->num_irq = 0;
10157 cp->drv_state = CNIC_DRV_STATE_REGD;
Dmitry Kravkov523224a2010-10-06 03:23:26 +000010158 cp->iro_arr = bp->iro_arr;
Michael Chan993ac7b2009-10-10 13:46:56 +000010159
Michael Chan993ac7b2009-10-10 13:46:56 +000010160 bnx2x_setup_cnic_irq_info(bp);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +000010161
Michael Chan993ac7b2009-10-10 13:46:56 +000010162 rcu_assign_pointer(bp->cnic_ops, ops);
10163
10164 return 0;
10165}
10166
10167static int bnx2x_unregister_cnic(struct net_device *dev)
10168{
10169 struct bnx2x *bp = netdev_priv(dev);
10170 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10171
10172 mutex_lock(&bp->cnic_mutex);
Michael Chan993ac7b2009-10-10 13:46:56 +000010173 cp->drv_state = 0;
10174 rcu_assign_pointer(bp->cnic_ops, NULL);
10175 mutex_unlock(&bp->cnic_mutex);
10176 synchronize_rcu();
10177 kfree(bp->cnic_kwq);
10178 bp->cnic_kwq = NULL;
10179
10180 return 0;
10181}
10182
10183struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
10184{
10185 struct bnx2x *bp = netdev_priv(dev);
10186 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10187
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +000010188 /* If both iSCSI and FCoE are disabled - return NULL in
10189 * order to indicate CNIC that it should not try to work
10190 * with this device.
10191 */
10192 if (NO_ISCSI(bp) && NO_FCOE(bp))
10193 return NULL;
10194
Michael Chan993ac7b2009-10-10 13:46:56 +000010195 cp->drv_owner = THIS_MODULE;
10196 cp->chip_id = CHIP_ID(bp);
10197 cp->pdev = bp->pdev;
10198 cp->io_base = bp->regview;
10199 cp->io_base2 = bp->doorbells;
10200 cp->max_kwqe_pending = 8;
Dmitry Kravkov523224a2010-10-06 03:23:26 +000010201 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +000010202 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
10203 bnx2x_cid_ilt_lines(bp);
Michael Chan993ac7b2009-10-10 13:46:56 +000010204 cp->ctx_tbl_len = CNIC_ILT_LINES;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +000010205 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
Michael Chan993ac7b2009-10-10 13:46:56 +000010206 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
10207 cp->drv_ctl = bnx2x_drv_ctl;
10208 cp->drv_register_cnic = bnx2x_register_cnic;
10209 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +000010210 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
10211 cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID +
10212 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +000010213 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
Michael Chan993ac7b2009-10-10 13:46:56 +000010214
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +000010215 if (NO_ISCSI_OOO(bp))
10216 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
10217
10218 if (NO_ISCSI(bp))
10219 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
10220
10221 if (NO_FCOE(bp))
10222 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
10223
Dmitry Kravkovc2bff632010-10-06 03:33:18 +000010224 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
10225 "starting cid %d\n",
10226 cp->ctx_blk_size,
10227 cp->ctx_tbl_offset,
10228 cp->ctx_tbl_len,
10229 cp->starting_cid);
Michael Chan993ac7b2009-10-10 13:46:56 +000010230 return cp;
10231}
10232EXPORT_SYMBOL(bnx2x_cnic_probe);
10233
10234#endif /* BCM_CNIC */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070010235