blob: f08a42ad6b470d58d81d87990c9dcdacacc2b732 [file] [log] [blame]
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001/* bnx2x_cmn.h: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17#ifndef BNX2X_CMN_H
18#define BNX2X_CMN_H
19
20#include <linux/types.h>
21#include <linux/netdevice.h>
22
23
24#include "bnx2x.h"
25
26
27/*********************** Interfaces ****************************
28 * Functions that need to be implemented by each driver version
29 */
30
31/**
32 * Initialize link parameters structure variables.
33 *
34 * @param bp
35 * @param load_mode
36 *
37 * @return u8
38 */
39u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode);
40
41/**
42 * Configure hw according to link parameters structure.
43 *
44 * @param bp
45 */
46void bnx2x_link_set(struct bnx2x *bp);
47
48/**
49 * Query link status
50 *
51 * @param bp
Yaniv Rosnera22f0782010-09-07 11:41:20 +000052 * @param is_serdes
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000053 *
54 * @return 0 - link is UP
55 */
Yaniv Rosnera22f0782010-09-07 11:41:20 +000056u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000057
58/**
59 * Handles link status change
60 *
61 * @param bp
62 */
63void bnx2x__link_status_update(struct bnx2x *bp);
64
65/**
66 * MSI-X slowpath interrupt handler
67 *
68 * @param irq
69 * @param dev_instance
70 *
71 * @return irqreturn_t
72 */
73irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance);
74
75/**
76 * non MSI-X interrupt handler
77 *
78 * @param irq
79 * @param dev_instance
80 *
81 * @return irqreturn_t
82 */
83irqreturn_t bnx2x_interrupt(int irq, void *dev_instance);
84#ifdef BCM_CNIC
85
86/**
87 * Send command to cnic driver
88 *
89 * @param bp
90 * @param cmd
91 */
92int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
93
94/**
95 * Provides cnic information for proper interrupt handling
96 *
97 * @param bp
98 */
99void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
100#endif
101
102/**
103 * Enable HW interrupts.
104 *
105 * @param bp
106 */
107void bnx2x_int_enable(struct bnx2x *bp);
108
109/**
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000110 * Disable HW interrupts.
111 *
112 * @param bp
113 */
114void bnx2x_int_disable(struct bnx2x *bp);
115
116/**
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000117 * Disable interrupts. This function ensures that there are no
118 * ISRs or SP DPCs (sp_task) are running after it returns.
119 *
120 * @param bp
121 * @param disable_hw if true, disable HW interrupts.
122 */
123void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
124
125/**
Dmitry Kravkov6891dd22010-08-03 21:49:40 +0000126 * Loads device firmware
127 *
128 * @param bp
129 *
130 * @return int
131 */
132int bnx2x_init_firmware(struct bnx2x *bp);
133
134/**
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000135 * Init HW blocks according to current initialization stage:
136 * COMMON, PORT or FUNCTION.
137 *
138 * @param bp
139 * @param load_code: COMMON, PORT or FUNCTION
140 *
141 * @return int
142 */
143int bnx2x_init_hw(struct bnx2x *bp, u32 load_code);
144
145/**
146 * Init driver internals:
147 * - rings
148 * - status blocks
149 * - etc.
150 *
151 * @param bp
152 * @param load_code COMMON, PORT or FUNCTION
153 */
154void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
155
156/**
157 * Allocate driver's memory.
158 *
159 * @param bp
160 *
161 * @return int
162 */
163int bnx2x_alloc_mem(struct bnx2x *bp);
164
165/**
166 * Release driver's memory.
167 *
168 * @param bp
169 */
170void bnx2x_free_mem(struct bnx2x *bp);
171
172/**
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000173 * Setup eth Client.
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000174 *
175 * @param bp
176 * @param fp
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000177 * @param is_leading
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000178 *
179 * @return int
180 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000181int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
182 int is_leading);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000183
184/**
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000185 * Bring down an eth client.
186 *
187 * @param bp
188 * @param p
189 *
190 * @return int
191 */
192int bnx2x_stop_fw_client(struct bnx2x *bp,
193 struct bnx2x_client_ramrod_params *p);
194
195/**
196 * Set number of quueus according to mode
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000197 *
198 * @param bp
199 *
200 */
201void bnx2x_set_num_queues_msix(struct bnx2x *bp);
202
203/**
204 * Cleanup chip internals:
205 * - Cleanup MAC configuration.
206 * - Close clients.
207 * - etc.
208 *
209 * @param bp
210 * @param unload_mode
211 */
212void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode);
213
214/**
215 * Acquire HW lock.
216 *
217 * @param bp
218 * @param resource Resource bit which was locked
219 *
220 * @return int
221 */
222int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource);
223
224/**
225 * Release HW lock.
226 *
227 * @param bp driver handle
228 * @param resource Resource bit which was locked
229 *
230 * @return int
231 */
232int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
233
234/**
235 * Configure eth MAC address in the HW according to the value in
236 * netdev->dev_addr for 57711
237 *
238 * @param bp driver handle
239 * @param set
240 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000241void bnx2x_set_eth_mac(struct bnx2x *bp, int set);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000242
243#ifdef BCM_CNIC
244/**
245 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
246 * MAC(s). The function will wait until the ramrod completion
247 * returns.
248 *
249 * @param bp driver handle
250 * @param set set or clear the CAM entry
251 *
252 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
253 */
254int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set);
255#endif
256
257/**
258 * Initialize status block in FW and HW
259 *
260 * @param bp driver handle
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000261 * @param dma_addr_t mapping
262 * @param int sb_id
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000263 * @param int vfid
264 * @param u8 vf_valid
265 * @param int fw_sb_id
266 * @param int igu_sb_id
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000267 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000268void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
269 u8 vf_valid, int fw_sb_id, int igu_sb_id);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000270
271/**
272 * Reconfigure FW/HW according to dev->flags rx mode
273 *
274 * @param dev net_device
275 *
276 */
277void bnx2x_set_rx_mode(struct net_device *dev);
278
279/**
280 * Configure MAC filtering rules in a FW.
281 *
282 * @param bp driver handle
283 */
284void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
285
286/* Parity errors related */
287void bnx2x_inc_load_cnt(struct bnx2x *bp);
288u32 bnx2x_dec_load_cnt(struct bnx2x *bp);
289bool bnx2x_chk_parity_attn(struct bnx2x *bp);
290bool bnx2x_reset_is_done(struct bnx2x *bp);
291void bnx2x_disable_close_the_gate(struct bnx2x *bp);
292
293/**
294 * Perform statistics handling according to event
295 *
296 * @param bp driver handle
297 * @param even tbnx2x_stats_event
298 */
299void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
300
301/**
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000302 * Handle sp events
303 *
304 * @param fp fastpath handle for the event
305 * @param rr_cqe eth_rx_cqe
306 */
307void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
308
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000309/**
310 * Init/halt function before/after sending
311 * CLIENT_SETUP/CFC_DEL for the first/last client.
312 *
313 * @param bp
314 *
315 * @return int
316 */
317int bnx2x_func_start(struct bnx2x *bp);
318int bnx2x_func_stop(struct bnx2x *bp);
319
320/**
321 * Prepare ILT configurations according to current driver
322 * parameters.
323 *
324 * @param bp
325 */
326void bnx2x_ilt_set_info(struct bnx2x *bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000327
328static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
329{
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000330 barrier(); /* status block is written to by the chip */
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000331 fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000332}
333
334static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
335 struct bnx2x_fastpath *fp,
336 u16 bd_prod, u16 rx_comp_prod,
337 u16 rx_sge_prod)
338{
339 struct ustorm_eth_rx_producers rx_prods = {0};
340 int i;
341
342 /* Update producers */
343 rx_prods.bd_prod = bd_prod;
344 rx_prods.cqe_prod = rx_comp_prod;
345 rx_prods.sge_prod = rx_sge_prod;
346
347 /*
348 * Make sure that the BD and SGE data is updated before updating the
349 * producers since FW might read the BD/SGE right after the producer
350 * is updated.
351 * This is only applicable for weak-ordered memory model archs such
352 * as IA-64. The following barrier is also mandatory since FW will
353 * assumes BDs must have buffers.
354 */
355 wmb();
356
357 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000358 REG_WR(bp,
359 BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset + i*4,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000360 ((u32 *)&rx_prods)[i]);
361
362 mmiowb(); /* keep prod updates ordered */
363
364 DP(NETIF_MSG_RX_STATUS,
365 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
366 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
367}
368
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000369static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id,
370 u8 segment, u16 index, u8 op,
371 u8 update, u32 igu_addr)
372{
373 struct igu_regular cmd_data = {0};
374
375 cmd_data.sb_id_and_flags =
376 ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
377 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
378 (update << IGU_REGULAR_BUPDATE_SHIFT) |
379 (op << IGU_REGULAR_ENABLE_INT_SHIFT));
380
381 DP(NETIF_MSG_HW, "write 0x%08x to IGU addr 0x%x\n",
382 cmd_data.sb_id_and_flags, igu_addr);
383 REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags);
384
385 /* Make sure that ACK is written */
386 mmiowb();
387 barrier();
388}
389
390static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp,
391 u8 idu_sb_id, bool is_Pf)
392{
393 u32 data, ctl, cnt = 100;
394 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
395 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
396 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
397 u32 sb_bit = 1 << (idu_sb_id%32);
398 u32 func_encode = BP_FUNC(bp) |
399 ((is_Pf == true ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT);
400 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
401
402 /* Not supported in BC mode */
403 if (CHIP_INT_MODE_IS_BC(bp))
404 return;
405
406 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
407 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
408 IGU_REGULAR_CLEANUP_SET |
409 IGU_REGULAR_BCLEANUP;
410
411 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
412 func_encode << IGU_CTRL_REG_FID_SHIFT |
413 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
414
415 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
416 data, igu_addr_data);
417 REG_WR(bp, igu_addr_data, data);
418 mmiowb();
419 barrier();
420 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
421 ctl, igu_addr_ctl);
422 REG_WR(bp, igu_addr_ctl, ctl);
423 mmiowb();
424 barrier();
425
426 /* wait for clean up to finish */
427 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
428 msleep(20);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000429
430
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000431 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
432 DP(NETIF_MSG_HW, "Unable to finish IGU cleanup: "
433 "idu_sb_id %d offset %d bit %d (cnt %d)\n",
434 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
435 }
436}
437
438static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id,
439 u8 storm, u16 index, u8 op, u8 update)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000440{
441 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
442 COMMAND_REG_INT_ACK);
443 struct igu_ack_register igu_ack;
444
445 igu_ack.status_block_index = index;
446 igu_ack.sb_id_and_flags =
447 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
448 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
449 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
450 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
451
452 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
453 (*(u32 *)&igu_ack), hc_addr);
454 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
455
456 /* Make sure that ACK is written */
457 mmiowb();
458 barrier();
459}
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000460
461static inline void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
462 u16 index, u8 op, u8 update)
463{
464 u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
465
466 bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
467 igu_addr);
468}
469
470static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm,
471 u16 index, u8 op, u8 update)
472{
473 if (bp->common.int_block == INT_BLOCK_HC)
474 bnx2x_hc_ack_sb(bp, igu_sb_id, storm, index, op, update);
475 else {
476 u8 segment;
477
478 if (CHIP_INT_MODE_IS_BC(bp))
479 segment = storm;
480 else if (igu_sb_id != bp->igu_dsb_id)
481 segment = IGU_SEG_ACCESS_DEF;
482 else if (storm == ATTENTION_ID)
483 segment = IGU_SEG_ACCESS_ATTN;
484 else
485 segment = IGU_SEG_ACCESS_DEF;
486 bnx2x_igu_ack_sb(bp, igu_sb_id, segment, index, op, update);
487 }
488}
489
490static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000491{
492 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
493 COMMAND_REG_SIMD_MASK);
494 u32 result = REG_RD(bp, hc_addr);
495
496 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
497 result, hc_addr);
498
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000499 barrier();
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000500 return result;
501}
502
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000503static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp)
504{
505 u32 igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8);
506 u32 result = REG_RD(bp, igu_addr);
507
508 DP(NETIF_MSG_HW, "read 0x%08x from IGU addr 0x%x\n",
509 result, igu_addr);
510
511 barrier();
512 return result;
513}
514
515static inline u16 bnx2x_ack_int(struct bnx2x *bp)
516{
517 barrier();
518 if (bp->common.int_block == INT_BLOCK_HC)
519 return bnx2x_hc_ack_int(bp);
520 else
521 return bnx2x_igu_ack_int(bp);
522}
523
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000524/*
525 * fast path service functions
526 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000527static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
528{
529 /* Tell compiler that consumer and producer can change */
530 barrier();
Eric Dumazet807540b2010-09-23 05:40:09 +0000531 return fp->tx_pkt_prod != fp->tx_pkt_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000532}
533
534static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
535{
536 s16 used;
537 u16 prod;
538 u16 cons;
539
540 prod = fp->tx_bd_prod;
541 cons = fp->tx_bd_cons;
542
543 /* NUM_TX_RINGS = number of "next-page" entries
544 It will be used as a threshold */
545 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
546
547#ifdef BNX2X_STOP_ON_ERROR
548 WARN_ON(used < 0);
549 WARN_ON(used > fp->bp->tx_ring_size);
550 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
551#endif
552
553 return (s16)(fp->bp->tx_ring_size) - used;
554}
555
556static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
557{
558 u16 hw_cons;
559
560 /* Tell compiler that status block fields can change */
561 barrier();
562 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
563 return hw_cons != fp->tx_pkt_cons;
564}
565
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000566static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
567{
568 u16 rx_cons_sb;
569
570 /* Tell compiler that status block fields can change */
571 barrier();
572 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
573 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
574 rx_cons_sb++;
575 return (fp->rx_comp_cons != rx_cons_sb);
576}
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000577/**
578 * disables tx from stack point of view
579 *
580 * @param bp
581 */
582static inline void bnx2x_tx_disable(struct bnx2x *bp)
583{
584 netif_tx_disable(bp->dev);
585 netif_carrier_off(bp->dev);
586}
587
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000588static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
589 struct bnx2x_fastpath *fp, u16 index)
590{
591 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
592 struct page *page = sw_buf->page;
593 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
594
595 /* Skip "next page" elements */
596 if (!page)
597 return;
598
599 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
Dmitry Kravkov4bca60f2010-10-06 03:30:27 +0000600 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000601 __free_pages(page, PAGES_PER_SGE_SHIFT);
602
603 sw_buf->page = NULL;
604 sge->addr_hi = 0;
605 sge->addr_lo = 0;
606}
607
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000608
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000609
610
611
612static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
613{
614 int i, j;
615
616 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
617 int idx = RX_SGE_CNT * i - 1;
618
619 for (j = 0; j < 2; j++) {
620 SGE_MASK_CLEAR_BIT(fp, idx);
621 idx--;
622 }
623 }
624}
625
626static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
627{
628 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
629 memset(fp->sge_mask, 0xff,
630 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
631
632 /* Clear the two last indices in the page to 1:
633 these are the indices that correspond to the "next" element,
634 hence will never be indicated and should be removed from
635 the calculations. */
636 bnx2x_clear_sge_mask_next_elems(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000637}
638
639static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
640 struct bnx2x_fastpath *fp, u16 index)
641{
642 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
643 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
644 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
645 dma_addr_t mapping;
646
647 if (unlikely(page == NULL))
648 return -ENOMEM;
649
650 mapping = dma_map_page(&bp->pdev->dev, page, 0,
651 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
652 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
653 __free_pages(page, PAGES_PER_SGE_SHIFT);
654 return -ENOMEM;
655 }
656
657 sw_buf->page = page;
658 dma_unmap_addr_set(sw_buf, mapping, mapping);
659
660 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
661 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
662
663 return 0;
664}
665static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
666 struct bnx2x_fastpath *fp, u16 index)
667{
668 struct sk_buff *skb;
669 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
670 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
671 dma_addr_t mapping;
672
673 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
674 if (unlikely(skb == NULL))
675 return -ENOMEM;
676
677 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
678 DMA_FROM_DEVICE);
679 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
680 dev_kfree_skb(skb);
681 return -ENOMEM;
682 }
683
684 rx_buf->skb = skb;
685 dma_unmap_addr_set(rx_buf, mapping, mapping);
686
687 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
688 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
689
690 return 0;
691}
692
693/* note that we are not allocating a new skb,
694 * we are just moving one from cons to prod
695 * we are not creating a new mapping,
696 * so there is no need to check for dma_mapping_error().
697 */
698static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
Dmitry Kravkov749a8502010-10-06 03:29:05 +0000699 u16 cons, u16 prod)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000700{
701 struct bnx2x *bp = fp->bp;
702 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
703 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
704 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
705 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
706
707 dma_sync_single_for_device(&bp->pdev->dev,
708 dma_unmap_addr(cons_rx_buf, mapping),
709 RX_COPY_THRESH, DMA_FROM_DEVICE);
710
711 prod_rx_buf->skb = cons_rx_buf->skb;
712 dma_unmap_addr_set(prod_rx_buf, mapping,
713 dma_unmap_addr(cons_rx_buf, mapping));
714 *prod_bd = *cons_bd;
715}
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000716static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
717 struct bnx2x_fastpath *fp, int last)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000718{
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000719 int i;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000720
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000721 for (i = 0; i < last; i++)
722 bnx2x_free_rx_sge(bp, fp, i);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000723}
724
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000725static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
726 struct bnx2x_fastpath *fp, int last)
727{
728 int i;
729
730 for (i = 0; i < last; i++) {
731 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
732 struct sk_buff *skb = rx_buf->skb;
733
734 if (skb == NULL) {
735 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
736 continue;
737 }
738
739 if (fp->tpa_state[i] == BNX2X_TPA_START)
740 dma_unmap_single(&bp->pdev->dev,
741 dma_unmap_addr(rx_buf, mapping),
742 bp->rx_buf_size, DMA_FROM_DEVICE);
743
744 dev_kfree_skb(skb);
745 rx_buf->skb = NULL;
746 }
747}
748
749
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000750static inline void bnx2x_init_tx_rings(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000751{
752 int i, j;
753
754 for_each_queue(bp, j) {
755 struct bnx2x_fastpath *fp = &bp->fp[j];
756
757 for (i = 1; i <= NUM_TX_RINGS; i++) {
758 struct eth_tx_next_bd *tx_next_bd =
759 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
760
761 tx_next_bd->addr_hi =
762 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
763 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
764 tx_next_bd->addr_lo =
765 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
766 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
767 }
768
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000769 SET_FLAG(fp->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000770 fp->tx_db.data.zero_fill1 = 0;
771 fp->tx_db.data.prod = 0;
772
773 fp->tx_pkt_prod = 0;
774 fp->tx_pkt_cons = 0;
775 fp->tx_bd_prod = 0;
776 fp->tx_bd_cons = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000777 fp->tx_pkt = 0;
778 }
779}
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000780static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000781{
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000782 int i;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000783
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000784 for (i = 1; i <= NUM_RX_RINGS; i++) {
785 struct eth_rx_bd *rx_bd;
786
787 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
788 rx_bd->addr_hi =
789 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
790 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
791 rx_bd->addr_lo =
792 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
793 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
794 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000795}
796
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000797static inline void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
798{
799 int i;
800
801 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
802 struct eth_rx_sge *sge;
803
804 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
805 sge->addr_hi =
806 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
807 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
808
809 sge->addr_lo =
810 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
811 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
812 }
813}
814
815static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
816{
817 int i;
818 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
819 struct eth_rx_cqe_next_page *nextpg;
820
821 nextpg = (struct eth_rx_cqe_next_page *)
822 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
823 nextpg->addr_hi =
824 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
825 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
826 nextpg->addr_lo =
827 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
828 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
829 }
830}
831
832
833
834static inline void __storm_memset_struct(struct bnx2x *bp,
835 u32 addr, size_t size, u32 *data)
836{
837 int i;
838 for (i = 0; i < size/4; i++)
839 REG_WR(bp, addr + (i * 4), data[i]);
840}
841
842static inline void storm_memset_mac_filters(struct bnx2x *bp,
843 struct tstorm_eth_mac_filter_config *mac_filters,
844 u16 abs_fid)
845{
846 size_t size = sizeof(struct tstorm_eth_mac_filter_config);
847
848 u32 addr = BAR_TSTRORM_INTMEM +
849 TSTORM_MAC_FILTER_CONFIG_OFFSET(abs_fid);
850
851 __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
852}
853
854static inline void storm_memset_cmng(struct bnx2x *bp,
855 struct cmng_struct_per_port *cmng,
856 u8 port)
857{
858 size_t size = sizeof(struct cmng_struct_per_port);
859
860 u32 addr = BAR_XSTRORM_INTMEM +
861 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
862
863 __storm_memset_struct(bp, addr, size, (u32 *)cmng);
864}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000865/* HW Lock for shared dual port PHYs */
866void bnx2x_acquire_phy_lock(struct bnx2x *bp);
867void bnx2x_release_phy_lock(struct bnx2x *bp);
868
869void bnx2x_link_report(struct bnx2x *bp);
870int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
871int bnx2x_tx_int(struct bnx2x_fastpath *fp);
872void bnx2x_init_rx_rings(struct bnx2x *bp);
873netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
874
875int bnx2x_change_mac_addr(struct net_device *dev, void *p);
876void bnx2x_tx_timeout(struct net_device *dev);
877void bnx2x_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp);
878void bnx2x_netif_start(struct bnx2x *bp);
879void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
880void bnx2x_free_irq(struct bnx2x *bp, bool disable_only);
881int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state);
882int bnx2x_resume(struct pci_dev *pdev);
883void bnx2x_free_skbs(struct bnx2x *bp);
884int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
885int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
886int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
887int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
888
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000889/**
890 * Allocate/release memories outsize main driver structure
891 *
892 * @param bp
893 *
894 * @return int
895 */
896int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp);
897void bnx2x_free_mem_bp(struct bnx2x *bp);
898
899#define BNX2X_FW_IP_HDR_ALIGN_PAD 2 /* FW places hdr with this padding */
900
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000901#endif /* BNX2X_CMN_H */