blob: 86436c77af036d7884b9cc9525646a44f3ec4acf [file] [log] [blame]
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001/* bnx2x_stats.c: Broadcom Everest network driver.
2 *
Yuval Mintz247fa822013-01-14 05:11:50 +00003 * Copyright (c) 2007-2013 Broadcom Corporation
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
Joe Perchesf1deab52011-08-14 12:16:21 +000017
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Dmitry Kravkovf85582f2010-10-06 03:34:21 +000020#include "bnx2x_stats.h"
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030021#include "bnx2x_cmn.h"
Ariel Elior67c431a2013-01-01 05:22:36 +000022#include "bnx2x_sriov.h"
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000023
24/* Statistics */
25
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000026/*
27 * General service functions
28 */
29
30static inline long bnx2x_hilo(u32 *hiref)
31{
32 u32 lo = *(hiref + 1);
33#if (BITS_PER_LONG == 64)
34 u32 hi = *hiref;
35
36 return HILO_U64(hi, lo);
37#else
38 return lo;
39#endif
40}
41
Yuval Mintz08e9acc2012-09-10 05:51:04 +000042static inline u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp)
Barak Witkowski1d187b32011-12-05 22:41:50 +000043{
Yuval Mintz08e9acc2012-09-10 05:51:04 +000044 u16 res = 0;
Barak Witkowski1d187b32011-12-05 22:41:50 +000045
Yuval Mintz08e9acc2012-09-10 05:51:04 +000046 /* 'newest' convention - shmem2 cotains the size of the port stats */
47 if (SHMEM2_HAS(bp, sizeof_port_stats)) {
48 u32 size = SHMEM2_RD(bp, sizeof_port_stats);
49 if (size)
50 res = size;
Barak Witkowski1d187b32011-12-05 22:41:50 +000051
Yuval Mintz08e9acc2012-09-10 05:51:04 +000052 /* prevent newer BC from causing buffer overflow */
53 if (res > sizeof(struct host_port_stats))
54 res = sizeof(struct host_port_stats);
55 }
56
57 /* Older convention - all BCs support the port stats' fields up until
58 * the 'not_used' field
59 */
60 if (!res) {
61 res = offsetof(struct host_port_stats, not_used) + 4;
62
63 /* if PFC stats are supported by the MFW, DMA them as well */
64 if (bp->flags & BC_SUPPORTS_PFC_STATS) {
65 res += offsetof(struct host_port_stats,
66 pfc_frames_rx_lo) -
67 offsetof(struct host_port_stats,
68 pfc_frames_tx_hi) + 4 ;
69 }
70 }
71
72 res >>= 2;
73
74 WARN_ON(res > 2 * DMAE_LEN32_RD_MAX);
Barak Witkowski1d187b32011-12-05 22:41:50 +000075 return res;
76}
77
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000078/*
79 * Init service functions
80 */
81
Ariel Elior67c431a2013-01-01 05:22:36 +000082static void bnx2x_dp_stats(struct bnx2x *bp)
83{
84 int i;
85
86 DP(BNX2X_MSG_STATS, "dumping stats:\n"
87 "fw_stats_req\n"
88 " hdr\n"
89 " cmd_num %d\n"
90 " reserved0 %d\n"
91 " drv_stats_counter %d\n"
92 " reserved1 %d\n"
93 " stats_counters_addrs %x %x\n",
94 bp->fw_stats_req->hdr.cmd_num,
95 bp->fw_stats_req->hdr.reserved0,
96 bp->fw_stats_req->hdr.drv_stats_counter,
97 bp->fw_stats_req->hdr.reserved1,
98 bp->fw_stats_req->hdr.stats_counters_addrs.hi,
99 bp->fw_stats_req->hdr.stats_counters_addrs.lo);
100
101 for (i = 0; i < bp->fw_stats_req->hdr.cmd_num; i++) {
102 DP(BNX2X_MSG_STATS,
103 "query[%d]\n"
104 " kind %d\n"
105 " index %d\n"
106 " funcID %d\n"
107 " reserved %d\n"
108 " address %x %x\n",
109 i, bp->fw_stats_req->query[i].kind,
110 bp->fw_stats_req->query[i].index,
111 bp->fw_stats_req->query[i].funcID,
112 bp->fw_stats_req->query[i].reserved,
113 bp->fw_stats_req->query[i].address.hi,
114 bp->fw_stats_req->query[i].address.lo);
115 }
116}
117
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300118/* Post the next statistics ramrod. Protect it with the spin in
119 * order to ensure the strict order between statistics ramrods
120 * (each ramrod has a sequence number passed in a
121 * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be
122 * sent in order).
123 */
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000124static void bnx2x_storm_stats_post(struct bnx2x *bp)
125{
126 if (!bp->stats_pending) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300127 int rc;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000128
David S. Millerbb7e95c2010-07-27 21:01:35 -0700129 spin_lock_bh(&bp->stats_lock);
130
Vladislav Zolotarov9bcb8012011-01-09 02:20:34 +0000131 if (bp->stats_pending) {
132 spin_unlock_bh(&bp->stats_lock);
133 return;
134 }
135
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300136 bp->fw_stats_req->hdr.drv_stats_counter =
137 cpu_to_le16(bp->stats_counter++);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000138
Merav Sicron51c1a582012-03-18 10:33:38 +0000139 DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300140 bp->fw_stats_req->hdr.drv_stats_counter);
141
Ariel Elior67c431a2013-01-01 05:22:36 +0000142 /* adjust the ramrod to include VF queues statistics */
143 bnx2x_iov_adjust_stats_req(bp);
144 bnx2x_dp_stats(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300145
146 /* send FW stats ramrod */
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000147 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300148 U64_HI(bp->fw_stats_req_mapping),
149 U64_LO(bp->fw_stats_req_mapping),
150 NONE_CONNECTION_TYPE);
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +0000151 if (rc == 0)
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000152 bp->stats_pending = 1;
David S. Millerbb7e95c2010-07-27 21:01:35 -0700153
154 spin_unlock_bh(&bp->stats_lock);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000155 }
156}
157
158static void bnx2x_hw_stats_post(struct bnx2x *bp)
159{
160 struct dmae_command *dmae = &bp->stats_dmae;
161 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
162
163 *stats_comp = DMAE_COMP_VAL;
164 if (CHIP_REV_IS_SLOW(bp))
165 return;
166
Yuval Mintz217aeb82012-09-11 04:34:09 +0000167 /* Update MCP's statistics if possible */
168 if (bp->func_stx)
169 memcpy(bnx2x_sp(bp, func_stats), &bp->func_stats,
170 sizeof(bp->func_stats));
171
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000172 /* loader */
173 if (bp->executer_idx) {
174 int loader_idx = PMF_DMAE_C(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000175 u32 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
176 true, DMAE_COMP_GRC);
177 opcode = bnx2x_dmae_opcode_clr_src_reset(opcode);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000178
179 memset(dmae, 0, sizeof(struct dmae_command));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000180 dmae->opcode = opcode;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000181 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
182 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
183 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
184 sizeof(struct dmae_command) *
185 (loader_idx + 1)) >> 2;
186 dmae->dst_addr_hi = 0;
187 dmae->len = sizeof(struct dmae_command) >> 2;
188 if (CHIP_IS_E1(bp))
189 dmae->len--;
190 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
191 dmae->comp_addr_hi = 0;
192 dmae->comp_val = 1;
193
194 *stats_comp = 0;
195 bnx2x_post_dmae(bp, dmae, loader_idx);
196
197 } else if (bp->func_stx) {
198 *stats_comp = 0;
199 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
200 }
201}
202
203static int bnx2x_stats_comp(struct bnx2x *bp)
204{
205 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
206 int cnt = 10;
207
208 might_sleep();
209 while (*stats_comp != DMAE_COMP_VAL) {
210 if (!cnt) {
211 BNX2X_ERR("timeout waiting for stats finished\n");
212 break;
213 }
214 cnt--;
Yuval Mintz0926d492013-01-23 03:21:45 +0000215 usleep_range(1000, 2000);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000216 }
217 return 1;
218}
219
220/*
221 * Statistics service functions
222 */
223
Dmitry Kravkov507393e2013-08-13 02:24:59 +0300224/* should be called under stats_sema */
225static void __bnx2x_stats_pmf_update(struct bnx2x *bp)
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000226{
227 struct dmae_command *dmae;
228 u32 opcode;
229 int loader_idx = PMF_DMAE_C(bp);
230 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
231
232 /* sanity */
Mintz Yuval1355b702012-02-15 02:10:22 +0000233 if (!bp->port.pmf || !bp->port.port_stx) {
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000234 BNX2X_ERR("BUG!\n");
235 return;
236 }
237
238 bp->executer_idx = 0;
239
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000240 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000241
242 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000243 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000244 dmae->src_addr_lo = bp->port.port_stx >> 2;
245 dmae->src_addr_hi = 0;
246 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
247 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
248 dmae->len = DMAE_LEN32_RD_MAX;
249 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
250 dmae->comp_addr_hi = 0;
251 dmae->comp_val = 1;
252
253 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000254 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000255 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
256 dmae->src_addr_hi = 0;
257 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
258 DMAE_LEN32_RD_MAX * 4);
259 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
260 DMAE_LEN32_RD_MAX * 4);
Barak Witkowski1d187b32011-12-05 22:41:50 +0000261 dmae->len = bnx2x_get_port_stats_dma_len(bp) - DMAE_LEN32_RD_MAX;
262
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000263 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
264 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
265 dmae->comp_val = DMAE_COMP_VAL;
266
267 *stats_comp = 0;
268 bnx2x_hw_stats_post(bp);
269 bnx2x_stats_comp(bp);
270}
271
272static void bnx2x_port_stats_init(struct bnx2x *bp)
273{
274 struct dmae_command *dmae;
275 int port = BP_PORT(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000276 u32 opcode;
277 int loader_idx = PMF_DMAE_C(bp);
278 u32 mac_addr;
279 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
280
281 /* sanity */
282 if (!bp->link_vars.link_up || !bp->port.pmf) {
283 BNX2X_ERR("BUG!\n");
284 return;
285 }
286
287 bp->executer_idx = 0;
288
289 /* MCP */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000290 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
291 true, DMAE_COMP_GRC);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000292
293 if (bp->port.port_stx) {
294
295 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
296 dmae->opcode = opcode;
297 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
298 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
299 dmae->dst_addr_lo = bp->port.port_stx >> 2;
300 dmae->dst_addr_hi = 0;
Barak Witkowski1d187b32011-12-05 22:41:50 +0000301 dmae->len = bnx2x_get_port_stats_dma_len(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000302 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
303 dmae->comp_addr_hi = 0;
304 dmae->comp_val = 1;
305 }
306
307 if (bp->func_stx) {
308
309 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
310 dmae->opcode = opcode;
311 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
312 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
313 dmae->dst_addr_lo = bp->func_stx >> 2;
314 dmae->dst_addr_hi = 0;
315 dmae->len = sizeof(struct host_func_stats) >> 2;
316 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
317 dmae->comp_addr_hi = 0;
318 dmae->comp_val = 1;
319 }
320
321 /* MAC */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000322 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
323 true, DMAE_COMP_GRC);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000324
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300325 /* EMAC is special */
326 if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000327 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
328
329 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
330 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
331 dmae->opcode = opcode;
332 dmae->src_addr_lo = (mac_addr +
333 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
334 dmae->src_addr_hi = 0;
335 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
336 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
337 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
338 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
339 dmae->comp_addr_hi = 0;
340 dmae->comp_val = 1;
341
342 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
343 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
344 dmae->opcode = opcode;
345 dmae->src_addr_lo = (mac_addr +
346 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
347 dmae->src_addr_hi = 0;
348 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
349 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
350 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
351 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
352 dmae->len = 1;
353 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
354 dmae->comp_addr_hi = 0;
355 dmae->comp_val = 1;
356
357 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
358 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
359 dmae->opcode = opcode;
360 dmae->src_addr_lo = (mac_addr +
361 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
362 dmae->src_addr_hi = 0;
363 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
364 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
365 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
366 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
367 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
368 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
369 dmae->comp_addr_hi = 0;
370 dmae->comp_val = 1;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300371 } else {
372 u32 tx_src_addr_lo, rx_src_addr_lo;
373 u16 rx_len, tx_len;
374
375 /* configure the params according to MAC type */
376 switch (bp->link_vars.mac_type) {
377 case MAC_TYPE_BMAC:
378 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
379 NIG_REG_INGRESS_BMAC0_MEM);
380
381 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
382 BIGMAC_REGISTER_TX_STAT_GTBYT */
383 if (CHIP_IS_E1x(bp)) {
384 tx_src_addr_lo = (mac_addr +
385 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
386 tx_len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
387 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
388 rx_src_addr_lo = (mac_addr +
389 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
390 rx_len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
391 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
392 } else {
393 tx_src_addr_lo = (mac_addr +
394 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
395 tx_len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
396 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
397 rx_src_addr_lo = (mac_addr +
398 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
399 rx_len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
400 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
401 }
402 break;
403
404 case MAC_TYPE_UMAC: /* handled by MSTAT */
405 case MAC_TYPE_XMAC: /* handled by MSTAT */
406 default:
407 mac_addr = port ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0;
408 tx_src_addr_lo = (mac_addr +
409 MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2;
410 rx_src_addr_lo = (mac_addr +
411 MSTAT_REG_RX_STAT_GR64_LO) >> 2;
412 tx_len = sizeof(bp->slowpath->
413 mac_stats.mstat_stats.stats_tx) >> 2;
414 rx_len = sizeof(bp->slowpath->
415 mac_stats.mstat_stats.stats_rx) >> 2;
416 break;
417 }
418
419 /* TX stats */
420 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
421 dmae->opcode = opcode;
422 dmae->src_addr_lo = tx_src_addr_lo;
423 dmae->src_addr_hi = 0;
424 dmae->len = tx_len;
425 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
426 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
427 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
428 dmae->comp_addr_hi = 0;
429 dmae->comp_val = 1;
430
431 /* RX stats */
432 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
433 dmae->opcode = opcode;
434 dmae->src_addr_hi = 0;
435 dmae->src_addr_lo = rx_src_addr_lo;
436 dmae->dst_addr_lo =
437 U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
438 dmae->dst_addr_hi =
439 U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
440 dmae->len = rx_len;
441 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
442 dmae->comp_addr_hi = 0;
443 dmae->comp_val = 1;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000444 }
445
446 /* NIG */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300447 if (!CHIP_IS_E3(bp)) {
448 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
449 dmae->opcode = opcode;
450 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
451 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
452 dmae->src_addr_hi = 0;
453 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
454 offsetof(struct nig_stats, egress_mac_pkt0_lo));
455 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
456 offsetof(struct nig_stats, egress_mac_pkt0_lo));
457 dmae->len = (2*sizeof(u32)) >> 2;
458 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
459 dmae->comp_addr_hi = 0;
460 dmae->comp_val = 1;
461
462 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
463 dmae->opcode = opcode;
464 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
465 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
466 dmae->src_addr_hi = 0;
467 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
468 offsetof(struct nig_stats, egress_mac_pkt1_lo));
469 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
470 offsetof(struct nig_stats, egress_mac_pkt1_lo));
471 dmae->len = (2*sizeof(u32)) >> 2;
472 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
473 dmae->comp_addr_hi = 0;
474 dmae->comp_val = 1;
475 }
476
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000477 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300478 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
479 true, DMAE_COMP_PCI);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000480 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
481 NIG_REG_STAT0_BRB_DISCARD) >> 2;
482 dmae->src_addr_hi = 0;
483 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
484 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
485 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000486
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000487 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
488 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
489 dmae->comp_val = DMAE_COMP_VAL;
490
491 *stats_comp = 0;
492}
493
494static void bnx2x_func_stats_init(struct bnx2x *bp)
495{
496 struct dmae_command *dmae = &bp->stats_dmae;
497 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
498
499 /* sanity */
500 if (!bp->func_stx) {
501 BNX2X_ERR("BUG!\n");
502 return;
503 }
504
505 bp->executer_idx = 0;
506 memset(dmae, 0, sizeof(struct dmae_command));
507
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000508 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
509 true, DMAE_COMP_PCI);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000510 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
511 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
512 dmae->dst_addr_lo = bp->func_stx >> 2;
513 dmae->dst_addr_hi = 0;
514 dmae->len = sizeof(struct host_func_stats) >> 2;
515 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
516 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
517 dmae->comp_val = DMAE_COMP_VAL;
518
519 *stats_comp = 0;
520}
521
Dmitry Kravkov507393e2013-08-13 02:24:59 +0300522/* should be called under stats_sema */
523static void __bnx2x_stats_start(struct bnx2x *bp)
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000524{
Ariel Eliorc730b172013-08-28 01:13:00 +0300525 if (IS_PF(bp)) {
526 if (bp->port.pmf)
527 bnx2x_port_stats_init(bp);
Ariel Elior67c431a2013-01-01 05:22:36 +0000528
Ariel Eliorc730b172013-08-28 01:13:00 +0300529 else if (bp->func_stx)
530 bnx2x_func_stats_init(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000531
Ariel Eliorc730b172013-08-28 01:13:00 +0300532 bnx2x_hw_stats_post(bp);
533 bnx2x_storm_stats_post(bp);
534 }
Dmitry Kravkov507393e2013-08-13 02:24:59 +0300535
536 bp->stats_started = true;
537}
538
539static void bnx2x_stats_start(struct bnx2x *bp)
540{
541 if (down_timeout(&bp->stats_sema, HZ/10))
542 BNX2X_ERR("Unable to acquire stats lock\n");
543 __bnx2x_stats_start(bp);
544 up(&bp->stats_sema);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000545}
546
547static void bnx2x_stats_pmf_start(struct bnx2x *bp)
548{
Dmitry Kravkov507393e2013-08-13 02:24:59 +0300549 if (down_timeout(&bp->stats_sema, HZ/10))
550 BNX2X_ERR("Unable to acquire stats lock\n");
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000551 bnx2x_stats_comp(bp);
Dmitry Kravkov507393e2013-08-13 02:24:59 +0300552 __bnx2x_stats_pmf_update(bp);
553 __bnx2x_stats_start(bp);
554 up(&bp->stats_sema);
555}
556
557static void bnx2x_stats_pmf_update(struct bnx2x *bp)
558{
559 if (down_timeout(&bp->stats_sema, HZ/10))
560 BNX2X_ERR("Unable to acquire stats lock\n");
561 __bnx2x_stats_pmf_update(bp);
562 up(&bp->stats_sema);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000563}
564
565static void bnx2x_stats_restart(struct bnx2x *bp)
566{
Ariel Elior67c431a2013-01-01 05:22:36 +0000567 /* vfs travel through here as part of the statistics FSM, but no action
568 * is required
569 */
570 if (IS_VF(bp))
571 return;
Dmitry Kravkov507393e2013-08-13 02:24:59 +0300572 if (down_timeout(&bp->stats_sema, HZ/10))
573 BNX2X_ERR("Unable to acquire stats lock\n");
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000574 bnx2x_stats_comp(bp);
Dmitry Kravkov507393e2013-08-13 02:24:59 +0300575 __bnx2x_stats_start(bp);
576 up(&bp->stats_sema);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000577}
578
579static void bnx2x_bmac_stats_update(struct bnx2x *bp)
580{
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000581 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
582 struct bnx2x_eth_stats *estats = &bp->eth_stats;
583 struct {
584 u32 lo;
585 u32 hi;
586 } diff;
587
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000588 if (CHIP_IS_E1x(bp)) {
589 struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats);
590
591 /* the macros below will use "bmac1_stats" type */
592 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
593 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
594 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
595 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
596 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
597 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
598 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
599 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300600 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
601
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000602 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
603 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
604 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
605 UPDATE_STAT64(tx_stat_gt127,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000606 tx_stat_etherstatspkts65octetsto127octets);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000607 UPDATE_STAT64(tx_stat_gt255,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000608 tx_stat_etherstatspkts128octetsto255octets);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000609 UPDATE_STAT64(tx_stat_gt511,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000610 tx_stat_etherstatspkts256octetsto511octets);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000611 UPDATE_STAT64(tx_stat_gt1023,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000612 tx_stat_etherstatspkts512octetsto1023octets);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000613 UPDATE_STAT64(tx_stat_gt1518,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000614 tx_stat_etherstatspkts1024octetsto1522octets);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300615 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
616 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
617 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
618 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000619 UPDATE_STAT64(tx_stat_gterr,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000620 tx_stat_dot3statsinternalmactransmiterrors);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300621 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000622
623 } else {
624 struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats);
625
626 /* the macros below will use "bmac2_stats" type */
627 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
628 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
629 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
630 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
631 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
632 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
633 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
634 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300635 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000636 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
637 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
638 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
639 UPDATE_STAT64(tx_stat_gt127,
640 tx_stat_etherstatspkts65octetsto127octets);
641 UPDATE_STAT64(tx_stat_gt255,
642 tx_stat_etherstatspkts128octetsto255octets);
643 UPDATE_STAT64(tx_stat_gt511,
644 tx_stat_etherstatspkts256octetsto511octets);
645 UPDATE_STAT64(tx_stat_gt1023,
646 tx_stat_etherstatspkts512octetsto1023octets);
647 UPDATE_STAT64(tx_stat_gt1518,
648 tx_stat_etherstatspkts1024octetsto1522octets);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300649 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
650 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
651 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
652 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000653 UPDATE_STAT64(tx_stat_gterr,
654 tx_stat_dot3statsinternalmactransmiterrors);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300655 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000656
657 /* collect PFC stats */
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000658 pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
659 pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000660
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000661 pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
662 pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000663 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000664
665 estats->pause_frames_received_hi =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300666 pstats->mac_stx[1].rx_stat_mac_xpf_hi;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000667 estats->pause_frames_received_lo =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300668 pstats->mac_stx[1].rx_stat_mac_xpf_lo;
669
670 estats->pause_frames_sent_hi =
671 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
672 estats->pause_frames_sent_lo =
673 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000674
675 estats->pfc_frames_received_hi =
676 pstats->pfc_frames_rx_hi;
677 estats->pfc_frames_received_lo =
678 pstats->pfc_frames_rx_lo;
679 estats->pfc_frames_sent_hi =
680 pstats->pfc_frames_tx_hi;
681 estats->pfc_frames_sent_lo =
682 pstats->pfc_frames_tx_lo;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300683}
684
685static void bnx2x_mstat_stats_update(struct bnx2x *bp)
686{
687 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
688 struct bnx2x_eth_stats *estats = &bp->eth_stats;
689
690 struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats);
691
692 ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets);
693 ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors);
694 ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts);
695 ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong);
696 ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments);
697 ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived);
698 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered);
699 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf);
700 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
701 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
702
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000703 /* collect pfc stats */
704 ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi,
705 pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo);
706 ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi,
707 pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300708
709 ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
710 ADD_STAT64(stats_tx.tx_gt127,
711 tx_stat_etherstatspkts65octetsto127octets);
712 ADD_STAT64(stats_tx.tx_gt255,
713 tx_stat_etherstatspkts128octetsto255octets);
714 ADD_STAT64(stats_tx.tx_gt511,
715 tx_stat_etherstatspkts256octetsto511octets);
716 ADD_STAT64(stats_tx.tx_gt1023,
717 tx_stat_etherstatspkts512octetsto1023octets);
718 ADD_STAT64(stats_tx.tx_gt1518,
719 tx_stat_etherstatspkts1024octetsto1522octets);
720 ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047);
721
722 ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095);
723 ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216);
724 ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383);
725
726 ADD_STAT64(stats_tx.tx_gterr,
727 tx_stat_dot3statsinternalmactransmiterrors);
728 ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl);
729
Mintz Yuval1355b702012-02-15 02:10:22 +0000730 estats->etherstatspkts1024octetsto1522octets_hi =
731 pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi;
732 estats->etherstatspkts1024octetsto1522octets_lo =
733 pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo;
734
735 estats->etherstatspktsover1522octets_hi =
736 pstats->mac_stx[1].tx_stat_mac_2047_hi;
737 estats->etherstatspktsover1522octets_lo =
738 pstats->mac_stx[1].tx_stat_mac_2047_lo;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300739
740 ADD_64(estats->etherstatspktsover1522octets_hi,
Mintz Yuval1355b702012-02-15 02:10:22 +0000741 pstats->mac_stx[1].tx_stat_mac_4095_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300742 estats->etherstatspktsover1522octets_lo,
Mintz Yuval1355b702012-02-15 02:10:22 +0000743 pstats->mac_stx[1].tx_stat_mac_4095_lo);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300744
745 ADD_64(estats->etherstatspktsover1522octets_hi,
Mintz Yuval1355b702012-02-15 02:10:22 +0000746 pstats->mac_stx[1].tx_stat_mac_9216_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300747 estats->etherstatspktsover1522octets_lo,
Mintz Yuval1355b702012-02-15 02:10:22 +0000748 pstats->mac_stx[1].tx_stat_mac_9216_lo);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300749
750 ADD_64(estats->etherstatspktsover1522octets_hi,
Mintz Yuval1355b702012-02-15 02:10:22 +0000751 pstats->mac_stx[1].tx_stat_mac_16383_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300752 estats->etherstatspktsover1522octets_lo,
Mintz Yuval1355b702012-02-15 02:10:22 +0000753 pstats->mac_stx[1].tx_stat_mac_16383_lo);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300754
755 estats->pause_frames_received_hi =
756 pstats->mac_stx[1].rx_stat_mac_xpf_hi;
757 estats->pause_frames_received_lo =
758 pstats->mac_stx[1].rx_stat_mac_xpf_lo;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000759
760 estats->pause_frames_sent_hi =
761 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
762 estats->pause_frames_sent_lo =
763 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000764
765 estats->pfc_frames_received_hi =
766 pstats->pfc_frames_rx_hi;
767 estats->pfc_frames_received_lo =
768 pstats->pfc_frames_rx_lo;
769 estats->pfc_frames_sent_hi =
770 pstats->pfc_frames_tx_hi;
771 estats->pfc_frames_sent_lo =
772 pstats->pfc_frames_tx_lo;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000773}
774
775static void bnx2x_emac_stats_update(struct bnx2x *bp)
776{
777 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
778 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
779 struct bnx2x_eth_stats *estats = &bp->eth_stats;
780
781 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
782 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
783 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
784 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
785 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
786 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
787 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
788 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
789 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
790 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
791 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
792 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
793 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
794 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
795 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
796 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
797 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
798 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
799 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
800 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
801 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
802 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
803 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
804 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
805 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
806 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
807 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
808 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
809 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
810 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
811 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
812
813 estats->pause_frames_received_hi =
814 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
815 estats->pause_frames_received_lo =
816 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
817 ADD_64(estats->pause_frames_received_hi,
818 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
819 estats->pause_frames_received_lo,
820 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
821
822 estats->pause_frames_sent_hi =
823 pstats->mac_stx[1].tx_stat_outxonsent_hi;
824 estats->pause_frames_sent_lo =
825 pstats->mac_stx[1].tx_stat_outxonsent_lo;
826 ADD_64(estats->pause_frames_sent_hi,
827 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
828 estats->pause_frames_sent_lo,
829 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
830}
831
832static int bnx2x_hw_stats_update(struct bnx2x *bp)
833{
834 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
835 struct nig_stats *old = &(bp->port.old_nig_stats);
836 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
837 struct bnx2x_eth_stats *estats = &bp->eth_stats;
838 struct {
839 u32 lo;
840 u32 hi;
841 } diff;
842
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300843 switch (bp->link_vars.mac_type) {
844 case MAC_TYPE_BMAC:
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000845 bnx2x_bmac_stats_update(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300846 break;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000847
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300848 case MAC_TYPE_EMAC:
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000849 bnx2x_emac_stats_update(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300850 break;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000851
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300852 case MAC_TYPE_UMAC:
853 case MAC_TYPE_XMAC:
854 bnx2x_mstat_stats_update(bp);
855 break;
856
857 case MAC_TYPE_NONE: /* unreached */
David S. Miller8decf862011-09-22 03:23:13 -0400858 DP(BNX2X_MSG_STATS,
859 "stats updated by DMAE but no MAC active\n");
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000860 return -1;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300861
862 default: /* unreached */
863 BNX2X_ERR("Unknown MAC type\n");
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000864 }
865
866 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
867 new->brb_discard - old->brb_discard);
868 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
869 new->brb_truncate - old->brb_truncate);
870
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300871 if (!CHIP_IS_E3(bp)) {
872 UPDATE_STAT64_NIG(egress_mac_pkt0,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000873 etherstatspkts1024octetsto1522octets);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300874 UPDATE_STAT64_NIG(egress_mac_pkt1,
875 etherstatspktsover1522octets);
876 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000877
878 memcpy(old, new, sizeof(struct nig_stats));
879
880 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
881 sizeof(struct mac_stx));
882 estats->brb_drop_hi = pstats->brb_drop_hi;
883 estats->brb_drop_lo = pstats->brb_drop_lo;
884
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000885 pstats->host_port_stats_counter++;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000886
Yuval Mintzc20cd5d2012-07-23 21:16:06 +0000887 if (CHIP_IS_E3(bp)) {
888 u32 lpi_reg = BP_PORT(bp) ? MISC_REG_CPMU_LP_SM_ENT_CNT_P1
889 : MISC_REG_CPMU_LP_SM_ENT_CNT_P0;
890 estats->eee_tx_lpi += REG_RD(bp, lpi_reg);
891 }
Yuval Mintzc8c60d82012-06-06 17:13:07 +0000892
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000893 if (!BP_NOMCP(bp)) {
894 u32 nig_timer_max =
895 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
896 if (nig_timer_max != estats->nig_timer_max) {
897 estats->nig_timer_max = nig_timer_max;
898 BNX2X_ERR("NIG timer max (%u)\n",
899 estats->nig_timer_max);
900 }
901 }
902
903 return 0;
904}
905
Ariel Elior67c431a2013-01-01 05:22:36 +0000906static int bnx2x_storm_stats_validate_counters(struct bnx2x *bp)
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000907{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300908 struct stats_counter *counters = &bp->fw_stats_data->storm_counters;
David S. Millerbb7e95c2010-07-27 21:01:35 -0700909 u16 cur_stats_counter;
David S. Millerbb7e95c2010-07-27 21:01:35 -0700910 /* Make sure we use the value of the counter
911 * used for sending the last stats ramrod.
912 */
David S. Millerbb7e95c2010-07-27 21:01:35 -0700913 cur_stats_counter = bp->stats_counter - 1;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000914
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300915 /* are storm stats valid? */
916 if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000917 DP(BNX2X_MSG_STATS,
918 "stats not updated by xstorm xstorm counter (0x%x) != stats_counter (0x%x)\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300919 le16_to_cpu(counters->xstats_counter), bp->stats_counter);
920 return -EAGAIN;
921 }
922
923 if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000924 DP(BNX2X_MSG_STATS,
925 "stats not updated by ustorm ustorm counter (0x%x) != stats_counter (0x%x)\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300926 le16_to_cpu(counters->ustats_counter), bp->stats_counter);
927 return -EAGAIN;
928 }
929
930 if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000931 DP(BNX2X_MSG_STATS,
932 "stats not updated by cstorm cstorm counter (0x%x) != stats_counter (0x%x)\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300933 le16_to_cpu(counters->cstats_counter), bp->stats_counter);
934 return -EAGAIN;
935 }
936
937 if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000938 DP(BNX2X_MSG_STATS,
939 "stats not updated by tstorm tstorm counter (0x%x) != stats_counter (0x%x)\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300940 le16_to_cpu(counters->tstats_counter), bp->stats_counter);
941 return -EAGAIN;
942 }
Ariel Elior67c431a2013-01-01 05:22:36 +0000943 return 0;
944}
945
946static int bnx2x_storm_stats_update(struct bnx2x *bp)
947{
948 struct tstorm_per_port_stats *tport =
949 &bp->fw_stats_data->port.tstorm_port_statistics;
950 struct tstorm_per_pf_stats *tfunc =
951 &bp->fw_stats_data->pf.tstorm_pf_statistics;
952 struct host_func_stats *fstats = &bp->func_stats;
953 struct bnx2x_eth_stats *estats = &bp->eth_stats;
954 struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old;
955 int i;
956
957 /* vfs stat counter is managed by pf */
958 if (IS_PF(bp) && bnx2x_storm_stats_validate_counters(bp))
959 return -EAGAIN;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300960
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000961 estats->error_bytes_received_hi = 0;
962 estats->error_bytes_received_lo = 0;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000963
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000964 for_each_eth_queue(bp, i) {
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000965 struct bnx2x_fastpath *fp = &bp->fp[i];
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300966 struct tstorm_per_queue_stats *tclient =
967 &bp->fw_stats_data->queue_stats[i].
968 tstorm_queue_statistics;
Barak Witkowski15192a82012-06-19 07:48:28 +0000969 struct tstorm_per_queue_stats *old_tclient =
970 &bnx2x_fp_stats(bp, fp)->old_tclient;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300971 struct ustorm_per_queue_stats *uclient =
972 &bp->fw_stats_data->queue_stats[i].
973 ustorm_queue_statistics;
Barak Witkowski15192a82012-06-19 07:48:28 +0000974 struct ustorm_per_queue_stats *old_uclient =
975 &bnx2x_fp_stats(bp, fp)->old_uclient;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300976 struct xstorm_per_queue_stats *xclient =
977 &bp->fw_stats_data->queue_stats[i].
978 xstorm_queue_statistics;
Barak Witkowski15192a82012-06-19 07:48:28 +0000979 struct xstorm_per_queue_stats *old_xclient =
980 &bnx2x_fp_stats(bp, fp)->old_xclient;
981 struct bnx2x_eth_q_stats *qstats =
982 &bnx2x_fp_stats(bp, fp)->eth_q_stats;
983 struct bnx2x_eth_q_stats_old *qstats_old =
984 &bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
Mintz Yuval1355b702012-02-15 02:10:22 +0000985
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000986 u32 diff;
987
Merav Sicron51c1a582012-03-18 10:33:38 +0000988 DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, bcast_sent 0x%x mcast_sent 0x%x\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300989 i, xclient->ucast_pkts_sent,
990 xclient->bcast_pkts_sent, xclient->mcast_pkts_sent);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000991
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300992 DP(BNX2X_MSG_STATS, "---------------\n");
993
Mintz Yuval1355b702012-02-15 02:10:22 +0000994 UPDATE_QSTAT(tclient->rcv_bcast_bytes,
995 total_broadcast_bytes_received);
996 UPDATE_QSTAT(tclient->rcv_mcast_bytes,
997 total_multicast_bytes_received);
998 UPDATE_QSTAT(tclient->rcv_ucast_bytes,
999 total_unicast_bytes_received);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001000
1001 /*
1002 * sum to total_bytes_received all
1003 * unicast/multicast/broadcast
1004 */
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001005 qstats->total_bytes_received_hi =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001006 qstats->total_broadcast_bytes_received_hi;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001007 qstats->total_bytes_received_lo =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001008 qstats->total_broadcast_bytes_received_lo;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001009
1010 ADD_64(qstats->total_bytes_received_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001011 qstats->total_multicast_bytes_received_hi,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001012 qstats->total_bytes_received_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001013 qstats->total_multicast_bytes_received_lo);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001014
1015 ADD_64(qstats->total_bytes_received_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001016 qstats->total_unicast_bytes_received_hi,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001017 qstats->total_bytes_received_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001018 qstats->total_unicast_bytes_received_lo);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001019
1020 qstats->valid_bytes_received_hi =
1021 qstats->total_bytes_received_hi;
1022 qstats->valid_bytes_received_lo =
1023 qstats->total_bytes_received_lo;
1024
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001025 UPDATE_EXTEND_TSTAT(rcv_ucast_pkts,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001026 total_unicast_packets_received);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001027 UPDATE_EXTEND_TSTAT(rcv_mcast_pkts,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001028 total_multicast_packets_received);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001029 UPDATE_EXTEND_TSTAT(rcv_bcast_pkts,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001030 total_broadcast_packets_received);
Mintz Yuval1355b702012-02-15 02:10:22 +00001031 UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard,
Yuval Mintz86564c32013-01-23 03:21:50 +00001032 etherstatsoverrsizepkts, 32);
1033 UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard, 16);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001034
1035 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
1036 total_unicast_packets_received);
1037 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
1038 total_multicast_packets_received);
1039 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
1040 total_broadcast_packets_received);
Mintz Yuval1355b702012-02-15 02:10:22 +00001041 UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard);
1042 UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard);
1043 UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001044
Mintz Yuval1355b702012-02-15 02:10:22 +00001045 UPDATE_QSTAT(xclient->bcast_bytes_sent,
1046 total_broadcast_bytes_transmitted);
1047 UPDATE_QSTAT(xclient->mcast_bytes_sent,
1048 total_multicast_bytes_transmitted);
1049 UPDATE_QSTAT(xclient->ucast_bytes_sent,
1050 total_unicast_bytes_transmitted);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001051
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001052 /*
1053 * sum to total_bytes_transmitted all
1054 * unicast/multicast/broadcast
1055 */
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001056 qstats->total_bytes_transmitted_hi =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001057 qstats->total_unicast_bytes_transmitted_hi;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001058 qstats->total_bytes_transmitted_lo =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001059 qstats->total_unicast_bytes_transmitted_lo;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001060
1061 ADD_64(qstats->total_bytes_transmitted_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001062 qstats->total_broadcast_bytes_transmitted_hi,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001063 qstats->total_bytes_transmitted_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001064 qstats->total_broadcast_bytes_transmitted_lo);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001065
1066 ADD_64(qstats->total_bytes_transmitted_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001067 qstats->total_multicast_bytes_transmitted_hi,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001068 qstats->total_bytes_transmitted_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001069 qstats->total_multicast_bytes_transmitted_lo);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001070
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001071 UPDATE_EXTEND_XSTAT(ucast_pkts_sent,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001072 total_unicast_packets_transmitted);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001073 UPDATE_EXTEND_XSTAT(mcast_pkts_sent,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001074 total_multicast_packets_transmitted);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001075 UPDATE_EXTEND_XSTAT(bcast_pkts_sent,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001076 total_broadcast_packets_transmitted);
1077
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001078 UPDATE_EXTEND_TSTAT(checksum_discard,
1079 total_packets_received_checksum_discarded);
1080 UPDATE_EXTEND_TSTAT(ttl0_discard,
1081 total_packets_received_ttl0_discarded);
1082
1083 UPDATE_EXTEND_XSTAT(error_drop_pkts,
1084 total_transmitted_dropped_packets_error);
1085
1086 /* TPA aggregations completed */
Mintz Yuval1355b702012-02-15 02:10:22 +00001087 UPDATE_EXTEND_E_USTAT(coalesced_events, total_tpa_aggregations);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001088 /* Number of network frames aggregated by TPA */
Mintz Yuval1355b702012-02-15 02:10:22 +00001089 UPDATE_EXTEND_E_USTAT(coalesced_pkts,
1090 total_tpa_aggregated_frames);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001091 /* Total number of bytes in completed TPA aggregations */
Mintz Yuval1355b702012-02-15 02:10:22 +00001092 UPDATE_QSTAT(uclient->coalesced_bytes, total_tpa_bytes);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001093
Mintz Yuval1355b702012-02-15 02:10:22 +00001094 UPDATE_ESTAT_QSTAT_64(total_tpa_bytes);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001095
Mintz Yuval1355b702012-02-15 02:10:22 +00001096 UPDATE_FSTAT_QSTAT(total_bytes_received);
1097 UPDATE_FSTAT_QSTAT(total_bytes_transmitted);
1098 UPDATE_FSTAT_QSTAT(total_unicast_packets_received);
1099 UPDATE_FSTAT_QSTAT(total_multicast_packets_received);
1100 UPDATE_FSTAT_QSTAT(total_broadcast_packets_received);
1101 UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted);
1102 UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted);
1103 UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted);
1104 UPDATE_FSTAT_QSTAT(valid_bytes_received);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001105 }
1106
Mintz Yuval1355b702012-02-15 02:10:22 +00001107 ADD_64(estats->total_bytes_received_hi,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001108 estats->rx_stat_ifhcinbadoctets_hi,
Mintz Yuval1355b702012-02-15 02:10:22 +00001109 estats->total_bytes_received_lo,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001110 estats->rx_stat_ifhcinbadoctets_lo);
1111
Yuval Mintz86564c32013-01-23 03:21:50 +00001112 ADD_64_LE(estats->total_bytes_received_hi,
1113 tfunc->rcv_error_bytes.hi,
1114 estats->total_bytes_received_lo,
1115 tfunc->rcv_error_bytes.lo);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001116
Yuval Mintz86564c32013-01-23 03:21:50 +00001117 ADD_64_LE(estats->error_bytes_received_hi,
1118 tfunc->rcv_error_bytes.hi,
1119 estats->error_bytes_received_lo,
1120 tfunc->rcv_error_bytes.lo);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001121
Mintz Yuval1355b702012-02-15 02:10:22 +00001122 UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong);
1123
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001124 ADD_64(estats->error_bytes_received_hi,
1125 estats->rx_stat_ifhcinbadoctets_hi,
1126 estats->error_bytes_received_lo,
1127 estats->rx_stat_ifhcinbadoctets_lo);
1128
1129 if (bp->port.pmf) {
Mintz Yuval1355b702012-02-15 02:10:22 +00001130 struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old;
1131 UPDATE_FW_STAT(mac_filter_discard);
1132 UPDATE_FW_STAT(mf_tag_discard);
1133 UPDATE_FW_STAT(brb_truncate_discard);
1134 UPDATE_FW_STAT(mac_discard);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001135 }
1136
1137 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
1138
1139 bp->stats_pending = 0;
1140
1141 return 0;
1142}
1143
1144static void bnx2x_net_stats_update(struct bnx2x *bp)
1145{
1146 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1147 struct net_device_stats *nstats = &bp->dev->stats;
Eric Dumazetc6a056a2010-08-23 19:44:08 +00001148 unsigned long tmp;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001149 int i;
1150
1151 nstats->rx_packets =
1152 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
1153 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
1154 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
1155
1156 nstats->tx_packets =
1157 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
1158 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
1159 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
1160
1161 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
1162
1163 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
1164
Eric Dumazetc6a056a2010-08-23 19:44:08 +00001165 tmp = estats->mac_discard;
Barak Witkowski15192a82012-06-19 07:48:28 +00001166 for_each_rx_queue(bp, i) {
1167 struct tstorm_per_queue_stats *old_tclient =
1168 &bp->fp_stats[i].old_tclient;
1169 tmp += le32_to_cpu(old_tclient->checksum_discard);
1170 }
Mintz Yuval1355b702012-02-15 02:10:22 +00001171 nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001172
1173 nstats->tx_dropped = 0;
1174
1175 nstats->multicast =
1176 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
1177
1178 nstats->collisions =
1179 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
1180
1181 nstats->rx_length_errors =
1182 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
1183 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
1184 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
1185 bnx2x_hilo(&estats->brb_truncate_hi);
1186 nstats->rx_crc_errors =
1187 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
1188 nstats->rx_frame_errors =
1189 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
1190 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001191 nstats->rx_missed_errors = 0;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001192
1193 nstats->rx_errors = nstats->rx_length_errors +
1194 nstats->rx_over_errors +
1195 nstats->rx_crc_errors +
1196 nstats->rx_frame_errors +
1197 nstats->rx_fifo_errors +
1198 nstats->rx_missed_errors;
1199
1200 nstats->tx_aborted_errors =
1201 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
1202 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
1203 nstats->tx_carrier_errors =
1204 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
1205 nstats->tx_fifo_errors = 0;
1206 nstats->tx_heartbeat_errors = 0;
1207 nstats->tx_window_errors = 0;
1208
1209 nstats->tx_errors = nstats->tx_aborted_errors +
1210 nstats->tx_carrier_errors +
1211 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
1212}
1213
1214static void bnx2x_drv_stats_update(struct bnx2x *bp)
1215{
1216 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1217 int i;
1218
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001219 for_each_queue(bp, i) {
Barak Witkowski15192a82012-06-19 07:48:28 +00001220 struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
Mintz Yuval1355b702012-02-15 02:10:22 +00001221 struct bnx2x_eth_q_stats_old *qstats_old =
Barak Witkowski15192a82012-06-19 07:48:28 +00001222 &bp->fp_stats[i].eth_q_stats_old;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001223
Mintz Yuval1355b702012-02-15 02:10:22 +00001224 UPDATE_ESTAT_QSTAT(driver_xoff);
1225 UPDATE_ESTAT_QSTAT(rx_err_discard_pkt);
1226 UPDATE_ESTAT_QSTAT(rx_skb_alloc_failed);
1227 UPDATE_ESTAT_QSTAT(hw_csum_err);
Dmitry Kravkovc96bdc02012-12-02 04:05:48 +00001228 UPDATE_ESTAT_QSTAT(driver_filtered_tx_pkt);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001229 }
1230}
1231
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001232static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp)
1233{
1234 u32 val;
1235
1236 if (SHMEM2_HAS(bp, edebug_driver_if[1])) {
1237 val = SHMEM2_RD(bp, edebug_driver_if[1]);
1238
1239 if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT)
1240 return true;
1241 }
1242
1243 return false;
1244}
1245
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001246static void bnx2x_stats_update(struct bnx2x *bp)
1247{
1248 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1249
Dmitry Kravkov507393e2013-08-13 02:24:59 +03001250 /* we run update from timer context, so give up
1251 * if somebody is in the middle of transition
1252 */
1253 if (down_trylock(&bp->stats_sema))
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001254 return;
1255
Dmitry Kravkov507393e2013-08-13 02:24:59 +03001256 if (bnx2x_edebug_stats_stopped(bp) || !bp->stats_started)
1257 goto out;
1258
Ariel Elior67c431a2013-01-01 05:22:36 +00001259 if (IS_PF(bp)) {
1260 if (*stats_comp != DMAE_COMP_VAL)
Dmitry Kravkov507393e2013-08-13 02:24:59 +03001261 goto out;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001262
Ariel Elior67c431a2013-01-01 05:22:36 +00001263 if (bp->port.pmf)
1264 bnx2x_hw_stats_update(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001265
Ariel Elior67c431a2013-01-01 05:22:36 +00001266 if (bnx2x_storm_stats_update(bp)) {
1267 if (bp->stats_pending++ == 3) {
1268 BNX2X_ERR("storm stats were not updated for 3 times\n");
1269 bnx2x_panic();
1270 }
Dmitry Kravkov507393e2013-08-13 02:24:59 +03001271 goto out;
Dmitry Kravkovbef05402012-09-11 04:34:08 +00001272 }
Ariel Elior67c431a2013-01-01 05:22:36 +00001273 } else {
1274 /* vf doesn't collect HW statistics, and doesn't get completions
1275 * perform only update
1276 */
1277 bnx2x_storm_stats_update(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001278 }
1279
1280 bnx2x_net_stats_update(bp);
1281 bnx2x_drv_stats_update(bp);
1282
Ariel Elior67c431a2013-01-01 05:22:36 +00001283 /* vf is done */
1284 if (IS_VF(bp))
Dmitry Kravkov507393e2013-08-13 02:24:59 +03001285 goto out;
Ariel Elior67c431a2013-01-01 05:22:36 +00001286
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001287 if (netif_msg_timer(bp)) {
1288 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001289
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001290 netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n",
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001291 estats->brb_drop_lo, estats->brb_truncate_lo);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001292 }
1293
1294 bnx2x_hw_stats_post(bp);
1295 bnx2x_storm_stats_post(bp);
Dmitry Kravkov507393e2013-08-13 02:24:59 +03001296
1297out:
1298 up(&bp->stats_sema);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001299}
1300
1301static void bnx2x_port_stats_stop(struct bnx2x *bp)
1302{
1303 struct dmae_command *dmae;
1304 u32 opcode;
1305 int loader_idx = PMF_DMAE_C(bp);
1306 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1307
1308 bp->executer_idx = 0;
1309
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001310 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001311
1312 if (bp->port.port_stx) {
1313
1314 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1315 if (bp->func_stx)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001316 dmae->opcode = bnx2x_dmae_opcode_add_comp(
1317 opcode, DMAE_COMP_GRC);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001318 else
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001319 dmae->opcode = bnx2x_dmae_opcode_add_comp(
1320 opcode, DMAE_COMP_PCI);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001321
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001322 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1323 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1324 dmae->dst_addr_lo = bp->port.port_stx >> 2;
1325 dmae->dst_addr_hi = 0;
Barak Witkowski1d187b32011-12-05 22:41:50 +00001326 dmae->len = bnx2x_get_port_stats_dma_len(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001327 if (bp->func_stx) {
1328 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
1329 dmae->comp_addr_hi = 0;
1330 dmae->comp_val = 1;
1331 } else {
1332 dmae->comp_addr_lo =
1333 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1334 dmae->comp_addr_hi =
1335 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1336 dmae->comp_val = DMAE_COMP_VAL;
1337
1338 *stats_comp = 0;
1339 }
1340 }
1341
1342 if (bp->func_stx) {
1343
1344 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001345 dmae->opcode =
1346 bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001347 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
1348 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
1349 dmae->dst_addr_lo = bp->func_stx >> 2;
1350 dmae->dst_addr_hi = 0;
1351 dmae->len = sizeof(struct host_func_stats) >> 2;
1352 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1353 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1354 dmae->comp_val = DMAE_COMP_VAL;
1355
1356 *stats_comp = 0;
1357 }
1358}
1359
1360static void bnx2x_stats_stop(struct bnx2x *bp)
1361{
1362 int update = 0;
1363
Dmitry Kravkov507393e2013-08-13 02:24:59 +03001364 if (down_timeout(&bp->stats_sema, HZ/10))
1365 BNX2X_ERR("Unable to acquire stats lock\n");
1366
1367 bp->stats_started = false;
1368
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001369 bnx2x_stats_comp(bp);
1370
1371 if (bp->port.pmf)
1372 update = (bnx2x_hw_stats_update(bp) == 0);
1373
1374 update |= (bnx2x_storm_stats_update(bp) == 0);
1375
1376 if (update) {
1377 bnx2x_net_stats_update(bp);
1378
1379 if (bp->port.pmf)
1380 bnx2x_port_stats_stop(bp);
1381
1382 bnx2x_hw_stats_post(bp);
1383 bnx2x_stats_comp(bp);
1384 }
Dmitry Kravkov507393e2013-08-13 02:24:59 +03001385
1386 up(&bp->stats_sema);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001387}
1388
1389static void bnx2x_stats_do_nothing(struct bnx2x *bp)
1390{
1391}
1392
1393static const struct {
1394 void (*action)(struct bnx2x *bp);
1395 enum bnx2x_stats_state next_state;
1396} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
1397/* state event */
1398{
1399/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
1400/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
1401/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
1402/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
1403},
1404{
1405/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
1406/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
1407/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
1408/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
1409}
1410};
1411
1412void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
1413{
David S. Millerbb7e95c2010-07-27 21:01:35 -07001414 enum bnx2x_stats_state state;
Dmitry Kravkov507393e2013-08-13 02:24:59 +03001415 void (*action)(struct bnx2x *bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001416 if (unlikely(bp->panic))
1417 return;
Dmitry Kravkov4a025f42011-11-13 04:34:30 +00001418
David S. Millerbb7e95c2010-07-27 21:01:35 -07001419 spin_lock_bh(&bp->stats_lock);
1420 state = bp->stats_state;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001421 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
Dmitry Kravkov507393e2013-08-13 02:24:59 +03001422 action = bnx2x_stats_stm[state][event].action;
David S. Millerbb7e95c2010-07-27 21:01:35 -07001423 spin_unlock_bh(&bp->stats_lock);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001424
Dmitry Kravkov507393e2013-08-13 02:24:59 +03001425 action(bp);
Dmitry Kravkov4a025f42011-11-13 04:34:30 +00001426
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001427 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
1428 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
1429 state, event, bp->stats_state);
1430}
1431
1432static void bnx2x_port_stats_base_init(struct bnx2x *bp)
1433{
1434 struct dmae_command *dmae;
1435 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1436
1437 /* sanity */
1438 if (!bp->port.pmf || !bp->port.port_stx) {
1439 BNX2X_ERR("BUG!\n");
1440 return;
1441 }
1442
1443 bp->executer_idx = 0;
1444
1445 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001446 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
1447 true, DMAE_COMP_PCI);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001448 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1449 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1450 dmae->dst_addr_lo = bp->port.port_stx >> 2;
1451 dmae->dst_addr_hi = 0;
Barak Witkowski1d187b32011-12-05 22:41:50 +00001452 dmae->len = bnx2x_get_port_stats_dma_len(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001453 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1454 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1455 dmae->comp_val = DMAE_COMP_VAL;
1456
1457 *stats_comp = 0;
1458 bnx2x_hw_stats_post(bp);
1459 bnx2x_stats_comp(bp);
1460}
1461
Ben Hutchings1aa8b472012-07-10 10:56:59 +00001462/* This function will prepare the statistics ramrod data the way
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001463 * we will only have to increment the statistics counter and
1464 * send the ramrod each time we have to.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001465 */
Eric Dumazet1191cb82012-04-27 21:39:21 +00001466static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001467{
1468 int i;
Barak Witkowski50f0a562011-12-05 21:52:23 +00001469 int first_queue_query_index;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001470 struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr;
1471
1472 dma_addr_t cur_data_offset;
1473 struct stats_query_entry *cur_query_entry;
1474
1475 stats_hdr->cmd_num = bp->fw_stats_num;
1476 stats_hdr->drv_stats_counter = 0;
1477
1478 /* storm_counters struct contains the counters of completed
1479 * statistics requests per storm which are incremented by FW
1480 * each time it completes hadning a statistics ramrod. We will
1481 * check these counters in the timer handler and discard a
1482 * (statistics) ramrod completion.
1483 */
1484 cur_data_offset = bp->fw_stats_data_mapping +
1485 offsetof(struct bnx2x_fw_stats_data, storm_counters);
1486
1487 stats_hdr->stats_counters_addrs.hi =
1488 cpu_to_le32(U64_HI(cur_data_offset));
1489 stats_hdr->stats_counters_addrs.lo =
1490 cpu_to_le32(U64_LO(cur_data_offset));
1491
1492 /* prepare to the first stats ramrod (will be completed with
1493 * the counters equal to zero) - init counters to somethig different.
1494 */
1495 memset(&bp->fw_stats_data->storm_counters, 0xff,
1496 sizeof(struct stats_counter));
1497
1498 /**** Port FW statistics data ****/
1499 cur_data_offset = bp->fw_stats_data_mapping +
1500 offsetof(struct bnx2x_fw_stats_data, port);
1501
1502 cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX];
1503
1504 cur_query_entry->kind = STATS_TYPE_PORT;
1505 /* For port query index is a DONT CARE */
1506 cur_query_entry->index = BP_PORT(bp);
1507 /* For port query funcID is a DONT CARE */
1508 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1509 cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
1510 cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
1511
1512 /**** PF FW statistics data ****/
1513 cur_data_offset = bp->fw_stats_data_mapping +
1514 offsetof(struct bnx2x_fw_stats_data, pf);
1515
1516 cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX];
1517
1518 cur_query_entry->kind = STATS_TYPE_PF;
1519 /* For PF query index is a DONT CARE */
1520 cur_query_entry->index = BP_PORT(bp);
1521 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1522 cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
1523 cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
1524
Barak Witkowski50f0a562011-12-05 21:52:23 +00001525 /**** FCoE FW statistics data ****/
1526 if (!NO_FCOE(bp)) {
1527 cur_data_offset = bp->fw_stats_data_mapping +
1528 offsetof(struct bnx2x_fw_stats_data, fcoe);
1529
1530 cur_query_entry =
1531 &bp->fw_stats_req->query[BNX2X_FCOE_QUERY_IDX];
1532
1533 cur_query_entry->kind = STATS_TYPE_FCOE;
1534 /* For FCoE query index is a DONT CARE */
1535 cur_query_entry->index = BP_PORT(bp);
1536 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1537 cur_query_entry->address.hi =
1538 cpu_to_le32(U64_HI(cur_data_offset));
1539 cur_query_entry->address.lo =
1540 cpu_to_le32(U64_LO(cur_data_offset));
1541 }
1542
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001543 /**** Clients' queries ****/
1544 cur_data_offset = bp->fw_stats_data_mapping +
1545 offsetof(struct bnx2x_fw_stats_data, queue_stats);
1546
Barak Witkowski50f0a562011-12-05 21:52:23 +00001547 /* first queue query index depends whether FCoE offloaded request will
1548 * be included in the ramrod
1549 */
1550 if (!NO_FCOE(bp))
1551 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX;
1552 else
1553 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1;
1554
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001555 for_each_eth_queue(bp, i) {
1556 cur_query_entry =
1557 &bp->fw_stats_req->
Barak Witkowski50f0a562011-12-05 21:52:23 +00001558 query[first_queue_query_index + i];
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001559
1560 cur_query_entry->kind = STATS_TYPE_QUEUE;
1561 cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]);
1562 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1563 cur_query_entry->address.hi =
1564 cpu_to_le32(U64_HI(cur_data_offset));
1565 cur_query_entry->address.lo =
1566 cpu_to_le32(U64_LO(cur_data_offset));
1567
1568 cur_data_offset += sizeof(struct per_queue_stats);
1569 }
Barak Witkowski50f0a562011-12-05 21:52:23 +00001570
1571 /* add FCoE queue query if needed */
1572 if (!NO_FCOE(bp)) {
1573 cur_query_entry =
1574 &bp->fw_stats_req->
1575 query[first_queue_query_index + i];
1576
1577 cur_query_entry->kind = STATS_TYPE_QUEUE;
Merav Sicron65565882012-06-19 07:48:26 +00001578 cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX(bp)]);
Barak Witkowski50f0a562011-12-05 21:52:23 +00001579 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1580 cur_query_entry->address.hi =
1581 cpu_to_le32(U64_HI(cur_data_offset));
1582 cur_query_entry->address.lo =
1583 cpu_to_le32(U64_LO(cur_data_offset));
1584 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001585}
1586
Ariel Elior5b0752c2013-03-27 01:05:15 +00001587void bnx2x_memset_stats(struct bnx2x *bp)
1588{
1589 int i;
1590
1591 /* function stats */
1592 for_each_queue(bp, i) {
1593 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i];
1594
1595 memset(&fp_stats->old_tclient, 0,
1596 sizeof(fp_stats->old_tclient));
1597 memset(&fp_stats->old_uclient, 0,
1598 sizeof(fp_stats->old_uclient));
1599 memset(&fp_stats->old_xclient, 0,
1600 sizeof(fp_stats->old_xclient));
1601 if (bp->stats_init) {
1602 memset(&fp_stats->eth_q_stats, 0,
1603 sizeof(fp_stats->eth_q_stats));
1604 memset(&fp_stats->eth_q_stats_old, 0,
1605 sizeof(fp_stats->eth_q_stats_old));
1606 }
1607 }
1608
1609 memset(&bp->dev->stats, 0, sizeof(bp->dev->stats));
1610
1611 if (bp->stats_init) {
1612 memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old));
1613 memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old));
1614 memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old));
1615 memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
1616 memset(&bp->func_stats, 0, sizeof(bp->func_stats));
1617 }
1618
1619 bp->stats_state = STATS_STATE_DISABLED;
1620
1621 if (bp->port.pmf && bp->port.port_stx)
1622 bnx2x_port_stats_base_init(bp);
1623
1624 /* mark the end of statistics initializiation */
1625 bp->stats_init = false;
1626}
1627
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001628void bnx2x_stats_init(struct bnx2x *bp)
1629{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001630 int /*abs*/port = BP_PORT(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001631 int mb_idx = BP_FW_MB_IDX(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001632
1633 bp->stats_pending = 0;
1634 bp->executer_idx = 0;
1635 bp->stats_counter = 0;
1636
1637 /* port and func stats for management */
1638 if (!BP_NOMCP(bp)) {
1639 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001640 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001641
1642 } else {
1643 bp->port.port_stx = 0;
1644 bp->func_stx = 0;
1645 }
1646 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
1647 bp->port.port_stx, bp->func_stx);
1648
Mintz Yuval1355b702012-02-15 02:10:22 +00001649 /* pmf should retrieve port statistics from SP on a non-init*/
1650 if (!bp->stats_init && bp->port.pmf && bp->port.port_stx)
1651 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
1652
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001653 port = BP_PORT(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001654 /* port stats */
1655 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
1656 bp->port.old_nig_stats.brb_discard =
1657 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
1658 bp->port.old_nig_stats.brb_truncate =
1659 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001660 if (!CHIP_IS_E3(bp)) {
1661 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
1662 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
1663 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
1664 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
1665 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001666
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001667 /* Prepare statistics ramrod data */
1668 bnx2x_prep_fw_stats_req(bp);
Dmitry Kravkov6fe8bce2010-10-06 03:35:03 +00001669
Ariel Elior5b0752c2013-03-27 01:05:15 +00001670 /* Clean SP from previous statistics */
Mintz Yuval1355b702012-02-15 02:10:22 +00001671 if (bp->stats_init) {
Mintz Yuval1355b702012-02-15 02:10:22 +00001672 if (bp->func_stx) {
1673 memset(bnx2x_sp(bp, func_stats), 0,
1674 sizeof(struct host_func_stats));
1675 bnx2x_func_stats_init(bp);
1676 bnx2x_hw_stats_post(bp);
1677 bnx2x_stats_comp(bp);
1678 }
1679 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001680
Ariel Elior5b0752c2013-03-27 01:05:15 +00001681 bnx2x_memset_stats(bp);
Mintz Yuval1355b702012-02-15 02:10:22 +00001682}
1683
1684void bnx2x_save_statistics(struct bnx2x *bp)
1685{
1686 int i;
1687 struct net_device_stats *nstats = &bp->dev->stats;
1688
1689 /* save queue statistics */
1690 for_each_eth_queue(bp, i) {
1691 struct bnx2x_fastpath *fp = &bp->fp[i];
Barak Witkowski15192a82012-06-19 07:48:28 +00001692 struct bnx2x_eth_q_stats *qstats =
1693 &bnx2x_fp_stats(bp, fp)->eth_q_stats;
1694 struct bnx2x_eth_q_stats_old *qstats_old =
1695 &bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
Mintz Yuval1355b702012-02-15 02:10:22 +00001696
1697 UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi);
1698 UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo);
1699 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi);
1700 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo);
1701 UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi);
1702 UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo);
1703 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi);
1704 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo);
1705 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi);
1706 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo);
1707 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi);
1708 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo);
1709 UPDATE_QSTAT_OLD(total_tpa_bytes_hi);
1710 UPDATE_QSTAT_OLD(total_tpa_bytes_lo);
1711 }
1712
1713 /* save net_device_stats statistics */
1714 bp->net_stats_old.rx_dropped = nstats->rx_dropped;
1715
1716 /* store port firmware statistics */
1717 if (bp->port.pmf && IS_MF(bp)) {
1718 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1719 struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old;
1720 UPDATE_FW_STAT_OLD(mac_filter_discard);
1721 UPDATE_FW_STAT_OLD(mf_tag_discard);
1722 UPDATE_FW_STAT_OLD(brb_truncate_discard);
1723 UPDATE_FW_STAT_OLD(mac_discard);
1724 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001725}
Barak Witkowskia3348722012-04-23 03:04:46 +00001726
1727void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
1728 u32 stats_type)
1729{
1730 int i;
1731 struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats;
1732 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1733 struct per_queue_stats *fcoe_q_stats =
Merav Sicron65565882012-06-19 07:48:26 +00001734 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)];
Barak Witkowskia3348722012-04-23 03:04:46 +00001735
1736 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
1737 &fcoe_q_stats->tstorm_queue_statistics;
1738
1739 struct ustorm_per_queue_stats *fcoe_q_ustorm_stats =
1740 &fcoe_q_stats->ustorm_queue_statistics;
1741
1742 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
1743 &fcoe_q_stats->xstorm_queue_statistics;
1744
1745 struct fcoe_statistics_params *fw_fcoe_stat =
1746 &bp->fw_stats_data->fcoe;
1747
1748 memset(afex_stats, 0, sizeof(struct afex_stats));
1749
1750 for_each_eth_queue(bp, i) {
Barak Witkowski15192a82012-06-19 07:48:28 +00001751 struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
Barak Witkowskia3348722012-04-23 03:04:46 +00001752
1753 ADD_64(afex_stats->rx_unicast_bytes_hi,
1754 qstats->total_unicast_bytes_received_hi,
1755 afex_stats->rx_unicast_bytes_lo,
1756 qstats->total_unicast_bytes_received_lo);
1757
1758 ADD_64(afex_stats->rx_broadcast_bytes_hi,
1759 qstats->total_broadcast_bytes_received_hi,
1760 afex_stats->rx_broadcast_bytes_lo,
1761 qstats->total_broadcast_bytes_received_lo);
1762
1763 ADD_64(afex_stats->rx_multicast_bytes_hi,
1764 qstats->total_multicast_bytes_received_hi,
1765 afex_stats->rx_multicast_bytes_lo,
1766 qstats->total_multicast_bytes_received_lo);
1767
1768 ADD_64(afex_stats->rx_unicast_frames_hi,
1769 qstats->total_unicast_packets_received_hi,
1770 afex_stats->rx_unicast_frames_lo,
1771 qstats->total_unicast_packets_received_lo);
1772
1773 ADD_64(afex_stats->rx_broadcast_frames_hi,
1774 qstats->total_broadcast_packets_received_hi,
1775 afex_stats->rx_broadcast_frames_lo,
1776 qstats->total_broadcast_packets_received_lo);
1777
1778 ADD_64(afex_stats->rx_multicast_frames_hi,
1779 qstats->total_multicast_packets_received_hi,
1780 afex_stats->rx_multicast_frames_lo,
1781 qstats->total_multicast_packets_received_lo);
1782
1783 /* sum to rx_frames_discarded all discraded
1784 * packets due to size, ttl0 and checksum
1785 */
1786 ADD_64(afex_stats->rx_frames_discarded_hi,
1787 qstats->total_packets_received_checksum_discarded_hi,
1788 afex_stats->rx_frames_discarded_lo,
1789 qstats->total_packets_received_checksum_discarded_lo);
1790
1791 ADD_64(afex_stats->rx_frames_discarded_hi,
1792 qstats->total_packets_received_ttl0_discarded_hi,
1793 afex_stats->rx_frames_discarded_lo,
1794 qstats->total_packets_received_ttl0_discarded_lo);
1795
1796 ADD_64(afex_stats->rx_frames_discarded_hi,
1797 qstats->etherstatsoverrsizepkts_hi,
1798 afex_stats->rx_frames_discarded_lo,
1799 qstats->etherstatsoverrsizepkts_lo);
1800
1801 ADD_64(afex_stats->rx_frames_dropped_hi,
1802 qstats->no_buff_discard_hi,
1803 afex_stats->rx_frames_dropped_lo,
1804 qstats->no_buff_discard_lo);
1805
1806 ADD_64(afex_stats->tx_unicast_bytes_hi,
1807 qstats->total_unicast_bytes_transmitted_hi,
1808 afex_stats->tx_unicast_bytes_lo,
1809 qstats->total_unicast_bytes_transmitted_lo);
1810
1811 ADD_64(afex_stats->tx_broadcast_bytes_hi,
1812 qstats->total_broadcast_bytes_transmitted_hi,
1813 afex_stats->tx_broadcast_bytes_lo,
1814 qstats->total_broadcast_bytes_transmitted_lo);
1815
1816 ADD_64(afex_stats->tx_multicast_bytes_hi,
1817 qstats->total_multicast_bytes_transmitted_hi,
1818 afex_stats->tx_multicast_bytes_lo,
1819 qstats->total_multicast_bytes_transmitted_lo);
1820
1821 ADD_64(afex_stats->tx_unicast_frames_hi,
1822 qstats->total_unicast_packets_transmitted_hi,
1823 afex_stats->tx_unicast_frames_lo,
1824 qstats->total_unicast_packets_transmitted_lo);
1825
1826 ADD_64(afex_stats->tx_broadcast_frames_hi,
1827 qstats->total_broadcast_packets_transmitted_hi,
1828 afex_stats->tx_broadcast_frames_lo,
1829 qstats->total_broadcast_packets_transmitted_lo);
1830
1831 ADD_64(afex_stats->tx_multicast_frames_hi,
1832 qstats->total_multicast_packets_transmitted_hi,
1833 afex_stats->tx_multicast_frames_lo,
1834 qstats->total_multicast_packets_transmitted_lo);
1835
1836 ADD_64(afex_stats->tx_frames_dropped_hi,
1837 qstats->total_transmitted_dropped_packets_error_hi,
1838 afex_stats->tx_frames_dropped_lo,
1839 qstats->total_transmitted_dropped_packets_error_lo);
1840 }
1841
1842 /* now add FCoE statistics which are collected separately
1843 * (both offloaded and non offloaded)
1844 */
1845 if (!NO_FCOE(bp)) {
1846 ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
1847 LE32_0,
1848 afex_stats->rx_unicast_bytes_lo,
1849 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
1850
1851 ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
1852 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
1853 afex_stats->rx_unicast_bytes_lo,
1854 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
1855
1856 ADD_64_LE(afex_stats->rx_broadcast_bytes_hi,
1857 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
1858 afex_stats->rx_broadcast_bytes_lo,
1859 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
1860
1861 ADD_64_LE(afex_stats->rx_multicast_bytes_hi,
1862 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
1863 afex_stats->rx_multicast_bytes_lo,
1864 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
1865
1866 ADD_64_LE(afex_stats->rx_unicast_frames_hi,
1867 LE32_0,
1868 afex_stats->rx_unicast_frames_lo,
1869 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
1870
1871 ADD_64_LE(afex_stats->rx_unicast_frames_hi,
1872 LE32_0,
1873 afex_stats->rx_unicast_frames_lo,
1874 fcoe_q_tstorm_stats->rcv_ucast_pkts);
1875
1876 ADD_64_LE(afex_stats->rx_broadcast_frames_hi,
1877 LE32_0,
1878 afex_stats->rx_broadcast_frames_lo,
1879 fcoe_q_tstorm_stats->rcv_bcast_pkts);
1880
1881 ADD_64_LE(afex_stats->rx_multicast_frames_hi,
1882 LE32_0,
1883 afex_stats->rx_multicast_frames_lo,
1884 fcoe_q_tstorm_stats->rcv_ucast_pkts);
1885
1886 ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1887 LE32_0,
1888 afex_stats->rx_frames_discarded_lo,
1889 fcoe_q_tstorm_stats->checksum_discard);
1890
1891 ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1892 LE32_0,
1893 afex_stats->rx_frames_discarded_lo,
1894 fcoe_q_tstorm_stats->pkts_too_big_discard);
1895
1896 ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1897 LE32_0,
1898 afex_stats->rx_frames_discarded_lo,
1899 fcoe_q_tstorm_stats->ttl0_discard);
1900
1901 ADD_64_LE16(afex_stats->rx_frames_dropped_hi,
1902 LE16_0,
1903 afex_stats->rx_frames_dropped_lo,
1904 fcoe_q_tstorm_stats->no_buff_discard);
1905
1906 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1907 LE32_0,
1908 afex_stats->rx_frames_dropped_lo,
1909 fcoe_q_ustorm_stats->ucast_no_buff_pkts);
1910
1911 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1912 LE32_0,
1913 afex_stats->rx_frames_dropped_lo,
1914 fcoe_q_ustorm_stats->mcast_no_buff_pkts);
1915
1916 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1917 LE32_0,
1918 afex_stats->rx_frames_dropped_lo,
1919 fcoe_q_ustorm_stats->bcast_no_buff_pkts);
1920
1921 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1922 LE32_0,
1923 afex_stats->rx_frames_dropped_lo,
1924 fw_fcoe_stat->rx_stat1.fcoe_rx_drop_pkt_cnt);
1925
1926 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1927 LE32_0,
1928 afex_stats->rx_frames_dropped_lo,
1929 fw_fcoe_stat->rx_stat2.fcoe_rx_drop_pkt_cnt);
1930
1931 ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
1932 LE32_0,
1933 afex_stats->tx_unicast_bytes_lo,
1934 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
1935
1936 ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
1937 fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
1938 afex_stats->tx_unicast_bytes_lo,
1939 fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
1940
1941 ADD_64_LE(afex_stats->tx_broadcast_bytes_hi,
1942 fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
1943 afex_stats->tx_broadcast_bytes_lo,
1944 fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
1945
1946 ADD_64_LE(afex_stats->tx_multicast_bytes_hi,
1947 fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
1948 afex_stats->tx_multicast_bytes_lo,
1949 fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
1950
1951 ADD_64_LE(afex_stats->tx_unicast_frames_hi,
1952 LE32_0,
1953 afex_stats->tx_unicast_frames_lo,
1954 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
1955
1956 ADD_64_LE(afex_stats->tx_unicast_frames_hi,
1957 LE32_0,
1958 afex_stats->tx_unicast_frames_lo,
1959 fcoe_q_xstorm_stats->ucast_pkts_sent);
1960
1961 ADD_64_LE(afex_stats->tx_broadcast_frames_hi,
1962 LE32_0,
1963 afex_stats->tx_broadcast_frames_lo,
1964 fcoe_q_xstorm_stats->bcast_pkts_sent);
1965
1966 ADD_64_LE(afex_stats->tx_multicast_frames_hi,
1967 LE32_0,
1968 afex_stats->tx_multicast_frames_lo,
1969 fcoe_q_xstorm_stats->mcast_pkts_sent);
1970
1971 ADD_64_LE(afex_stats->tx_frames_dropped_hi,
1972 LE32_0,
1973 afex_stats->tx_frames_dropped_lo,
1974 fcoe_q_xstorm_stats->error_drop_pkts);
1975 }
1976
1977 /* if port stats are requested, add them to the PMF
1978 * stats, as anyway they will be accumulated by the
1979 * MCP before sent to the switch
1980 */
1981 if ((bp->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) {
1982 ADD_64(afex_stats->rx_frames_dropped_hi,
1983 0,
1984 afex_stats->rx_frames_dropped_lo,
1985 estats->mac_filter_discard);
1986 ADD_64(afex_stats->rx_frames_dropped_hi,
1987 0,
1988 afex_stats->rx_frames_dropped_lo,
1989 estats->brb_truncate_discard);
1990 ADD_64(afex_stats->rx_frames_discarded_hi,
1991 0,
1992 afex_stats->rx_frames_discarded_lo,
1993 estats->mac_discard);
1994 }
1995}
Ariel Eliora3097bd2013-08-28 01:13:04 +03001996
1997void bnx2x_stats_safe_exec(struct bnx2x *bp,
1998 void (func_to_exec)(void *cookie),
1999 void *cookie){
2000 if (down_timeout(&bp->stats_sema, HZ/10))
2001 BNX2X_ERR("Unable to acquire stats lock\n");
2002 bnx2x_stats_comp(bp);
2003 func_to_exec(cookie);
2004 __bnx2x_stats_start(bp);
2005 up(&bp->stats_sema);
2006}