blob: 69d699f0730a3bd4d8980607e0a36cd8da461f1e [file] [log] [blame]
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001/* bnx2x_stats.c: Broadcom Everest network driver.
2 *
Yuval Mintz247fa822013-01-14 05:11:50 +00003 * Copyright (c) 2007-2013 Broadcom Corporation
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
Ariel Elior08f6dd82014-05-27 13:11:36 +03009 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000010 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
Joe Perchesf1deab52011-08-14 12:16:21 +000017
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Dmitry Kravkovf85582f2010-10-06 03:34:21 +000020#include "bnx2x_stats.h"
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030021#include "bnx2x_cmn.h"
Ariel Elior67c431a2013-01-01 05:22:36 +000022#include "bnx2x_sriov.h"
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000023
24/* Statistics */
25
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000026/*
27 * General service functions
28 */
29
30static inline long bnx2x_hilo(u32 *hiref)
31{
32 u32 lo = *(hiref + 1);
33#if (BITS_PER_LONG == 64)
34 u32 hi = *hiref;
35
36 return HILO_U64(hi, lo);
37#else
38 return lo;
39#endif
40}
41
Yuval Mintz08e9acc2012-09-10 05:51:04 +000042static inline u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp)
Barak Witkowski1d187b32011-12-05 22:41:50 +000043{
Yuval Mintz08e9acc2012-09-10 05:51:04 +000044 u16 res = 0;
Barak Witkowski1d187b32011-12-05 22:41:50 +000045
Yuval Mintz08e9acc2012-09-10 05:51:04 +000046 /* 'newest' convention - shmem2 cotains the size of the port stats */
47 if (SHMEM2_HAS(bp, sizeof_port_stats)) {
48 u32 size = SHMEM2_RD(bp, sizeof_port_stats);
49 if (size)
50 res = size;
Barak Witkowski1d187b32011-12-05 22:41:50 +000051
Yuval Mintz08e9acc2012-09-10 05:51:04 +000052 /* prevent newer BC from causing buffer overflow */
53 if (res > sizeof(struct host_port_stats))
54 res = sizeof(struct host_port_stats);
55 }
56
57 /* Older convention - all BCs support the port stats' fields up until
58 * the 'not_used' field
59 */
60 if (!res) {
61 res = offsetof(struct host_port_stats, not_used) + 4;
62
63 /* if PFC stats are supported by the MFW, DMA them as well */
64 if (bp->flags & BC_SUPPORTS_PFC_STATS) {
65 res += offsetof(struct host_port_stats,
66 pfc_frames_rx_lo) -
67 offsetof(struct host_port_stats,
68 pfc_frames_tx_hi) + 4 ;
69 }
70 }
71
72 res >>= 2;
73
74 WARN_ON(res > 2 * DMAE_LEN32_RD_MAX);
Barak Witkowski1d187b32011-12-05 22:41:50 +000075 return res;
76}
77
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000078/*
79 * Init service functions
80 */
81
Ariel Elior67c431a2013-01-01 05:22:36 +000082static void bnx2x_dp_stats(struct bnx2x *bp)
83{
84 int i;
85
86 DP(BNX2X_MSG_STATS, "dumping stats:\n"
87 "fw_stats_req\n"
88 " hdr\n"
89 " cmd_num %d\n"
90 " reserved0 %d\n"
91 " drv_stats_counter %d\n"
92 " reserved1 %d\n"
93 " stats_counters_addrs %x %x\n",
94 bp->fw_stats_req->hdr.cmd_num,
95 bp->fw_stats_req->hdr.reserved0,
96 bp->fw_stats_req->hdr.drv_stats_counter,
97 bp->fw_stats_req->hdr.reserved1,
98 bp->fw_stats_req->hdr.stats_counters_addrs.hi,
99 bp->fw_stats_req->hdr.stats_counters_addrs.lo);
100
101 for (i = 0; i < bp->fw_stats_req->hdr.cmd_num; i++) {
102 DP(BNX2X_MSG_STATS,
103 "query[%d]\n"
104 " kind %d\n"
105 " index %d\n"
106 " funcID %d\n"
107 " reserved %d\n"
108 " address %x %x\n",
109 i, bp->fw_stats_req->query[i].kind,
110 bp->fw_stats_req->query[i].index,
111 bp->fw_stats_req->query[i].funcID,
112 bp->fw_stats_req->query[i].reserved,
113 bp->fw_stats_req->query[i].address.hi,
114 bp->fw_stats_req->query[i].address.lo);
115 }
116}
117
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300118/* Post the next statistics ramrod. Protect it with the spin in
119 * order to ensure the strict order between statistics ramrods
120 * (each ramrod has a sequence number passed in a
121 * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be
122 * sent in order).
123 */
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000124static void bnx2x_storm_stats_post(struct bnx2x *bp)
125{
Yuval Mintzdff173d2015-03-23 10:56:14 +0200126 int rc;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000127
Yuval Mintzdff173d2015-03-23 10:56:14 +0200128 if (bp->stats_pending)
129 return;
David S. Millerbb7e95c2010-07-27 21:01:35 -0700130
Yuval Mintzdff173d2015-03-23 10:56:14 +0200131 bp->fw_stats_req->hdr.drv_stats_counter =
132 cpu_to_le16(bp->stats_counter++);
Vladislav Zolotarov9bcb8012011-01-09 02:20:34 +0000133
Yuval Mintzdff173d2015-03-23 10:56:14 +0200134 DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
135 le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter));
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000136
Yuval Mintzdff173d2015-03-23 10:56:14 +0200137 /* adjust the ramrod to include VF queues statistics */
138 bnx2x_iov_adjust_stats_req(bp);
139 bnx2x_dp_stats(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300140
Yuval Mintzdff173d2015-03-23 10:56:14 +0200141 /* send FW stats ramrod */
142 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
143 U64_HI(bp->fw_stats_req_mapping),
144 U64_LO(bp->fw_stats_req_mapping),
145 NONE_CONNECTION_TYPE);
146 if (rc == 0)
147 bp->stats_pending = 1;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000148}
149
150static void bnx2x_hw_stats_post(struct bnx2x *bp)
151{
152 struct dmae_command *dmae = &bp->stats_dmae;
153 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
154
155 *stats_comp = DMAE_COMP_VAL;
156 if (CHIP_REV_IS_SLOW(bp))
157 return;
158
Yuval Mintz217aeb82012-09-11 04:34:09 +0000159 /* Update MCP's statistics if possible */
160 if (bp->func_stx)
161 memcpy(bnx2x_sp(bp, func_stats), &bp->func_stats,
162 sizeof(bp->func_stats));
163
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000164 /* loader */
165 if (bp->executer_idx) {
166 int loader_idx = PMF_DMAE_C(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000167 u32 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
168 true, DMAE_COMP_GRC);
169 opcode = bnx2x_dmae_opcode_clr_src_reset(opcode);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000170
171 memset(dmae, 0, sizeof(struct dmae_command));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000172 dmae->opcode = opcode;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000173 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
174 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
175 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
176 sizeof(struct dmae_command) *
177 (loader_idx + 1)) >> 2;
178 dmae->dst_addr_hi = 0;
179 dmae->len = sizeof(struct dmae_command) >> 2;
180 if (CHIP_IS_E1(bp))
181 dmae->len--;
182 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
183 dmae->comp_addr_hi = 0;
184 dmae->comp_val = 1;
185
186 *stats_comp = 0;
187 bnx2x_post_dmae(bp, dmae, loader_idx);
188
189 } else if (bp->func_stx) {
190 *stats_comp = 0;
Ariel Elior32316a42013-10-20 16:51:32 +0200191 bnx2x_issue_dmae_with_comp(bp, dmae, stats_comp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000192 }
193}
194
Yuval Mintz0c23ad32014-08-17 16:47:45 +0300195static void bnx2x_stats_comp(struct bnx2x *bp)
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000196{
197 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
198 int cnt = 10;
199
200 might_sleep();
201 while (*stats_comp != DMAE_COMP_VAL) {
202 if (!cnt) {
203 BNX2X_ERR("timeout waiting for stats finished\n");
204 break;
205 }
206 cnt--;
Yuval Mintz0926d492013-01-23 03:21:45 +0000207 usleep_range(1000, 2000);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000208 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000209}
210
211/*
212 * Statistics service functions
213 */
214
Dmitry Kravkov507393e2013-08-13 02:24:59 +0300215/* should be called under stats_sema */
Yuval Mintzdff173d2015-03-23 10:56:14 +0200216static void bnx2x_stats_pmf_update(struct bnx2x *bp)
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000217{
218 struct dmae_command *dmae;
219 u32 opcode;
220 int loader_idx = PMF_DMAE_C(bp);
221 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
222
223 /* sanity */
Mintz Yuval1355b702012-02-15 02:10:22 +0000224 if (!bp->port.pmf || !bp->port.port_stx) {
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000225 BNX2X_ERR("BUG!\n");
226 return;
227 }
228
229 bp->executer_idx = 0;
230
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000231 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000232
233 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000234 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000235 dmae->src_addr_lo = bp->port.port_stx >> 2;
236 dmae->src_addr_hi = 0;
237 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
238 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
239 dmae->len = DMAE_LEN32_RD_MAX;
240 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
241 dmae->comp_addr_hi = 0;
242 dmae->comp_val = 1;
243
244 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000245 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000246 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
247 dmae->src_addr_hi = 0;
248 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
249 DMAE_LEN32_RD_MAX * 4);
250 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
251 DMAE_LEN32_RD_MAX * 4);
Barak Witkowski1d187b32011-12-05 22:41:50 +0000252 dmae->len = bnx2x_get_port_stats_dma_len(bp) - DMAE_LEN32_RD_MAX;
253
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000254 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
255 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
256 dmae->comp_val = DMAE_COMP_VAL;
257
258 *stats_comp = 0;
259 bnx2x_hw_stats_post(bp);
260 bnx2x_stats_comp(bp);
261}
262
263static void bnx2x_port_stats_init(struct bnx2x *bp)
264{
265 struct dmae_command *dmae;
266 int port = BP_PORT(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000267 u32 opcode;
268 int loader_idx = PMF_DMAE_C(bp);
269 u32 mac_addr;
270 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
271
272 /* sanity */
273 if (!bp->link_vars.link_up || !bp->port.pmf) {
274 BNX2X_ERR("BUG!\n");
275 return;
276 }
277
278 bp->executer_idx = 0;
279
280 /* MCP */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000281 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
282 true, DMAE_COMP_GRC);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000283
284 if (bp->port.port_stx) {
285
286 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
287 dmae->opcode = opcode;
288 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
289 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
290 dmae->dst_addr_lo = bp->port.port_stx >> 2;
291 dmae->dst_addr_hi = 0;
Barak Witkowski1d187b32011-12-05 22:41:50 +0000292 dmae->len = bnx2x_get_port_stats_dma_len(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000293 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
294 dmae->comp_addr_hi = 0;
295 dmae->comp_val = 1;
296 }
297
298 if (bp->func_stx) {
299
300 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
301 dmae->opcode = opcode;
302 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
303 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
304 dmae->dst_addr_lo = bp->func_stx >> 2;
305 dmae->dst_addr_hi = 0;
306 dmae->len = sizeof(struct host_func_stats) >> 2;
307 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
308 dmae->comp_addr_hi = 0;
309 dmae->comp_val = 1;
310 }
311
312 /* MAC */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000313 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
314 true, DMAE_COMP_GRC);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000315
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300316 /* EMAC is special */
317 if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000318 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
319
320 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
321 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
322 dmae->opcode = opcode;
323 dmae->src_addr_lo = (mac_addr +
324 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
325 dmae->src_addr_hi = 0;
326 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
327 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
328 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
329 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
330 dmae->comp_addr_hi = 0;
331 dmae->comp_val = 1;
332
333 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
334 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
335 dmae->opcode = opcode;
336 dmae->src_addr_lo = (mac_addr +
337 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
338 dmae->src_addr_hi = 0;
339 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
340 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
341 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
342 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
343 dmae->len = 1;
344 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
345 dmae->comp_addr_hi = 0;
346 dmae->comp_val = 1;
347
348 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
349 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
350 dmae->opcode = opcode;
351 dmae->src_addr_lo = (mac_addr +
352 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
353 dmae->src_addr_hi = 0;
354 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
355 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
356 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
357 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
358 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
359 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
360 dmae->comp_addr_hi = 0;
361 dmae->comp_val = 1;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300362 } else {
363 u32 tx_src_addr_lo, rx_src_addr_lo;
364 u16 rx_len, tx_len;
365
366 /* configure the params according to MAC type */
367 switch (bp->link_vars.mac_type) {
368 case MAC_TYPE_BMAC:
369 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
370 NIG_REG_INGRESS_BMAC0_MEM);
371
372 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
373 BIGMAC_REGISTER_TX_STAT_GTBYT */
374 if (CHIP_IS_E1x(bp)) {
375 tx_src_addr_lo = (mac_addr +
376 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
377 tx_len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
378 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
379 rx_src_addr_lo = (mac_addr +
380 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
381 rx_len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
382 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
383 } else {
384 tx_src_addr_lo = (mac_addr +
385 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
386 tx_len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
387 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
388 rx_src_addr_lo = (mac_addr +
389 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
390 rx_len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
391 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
392 }
393 break;
394
395 case MAC_TYPE_UMAC: /* handled by MSTAT */
396 case MAC_TYPE_XMAC: /* handled by MSTAT */
397 default:
398 mac_addr = port ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0;
399 tx_src_addr_lo = (mac_addr +
400 MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2;
401 rx_src_addr_lo = (mac_addr +
402 MSTAT_REG_RX_STAT_GR64_LO) >> 2;
403 tx_len = sizeof(bp->slowpath->
404 mac_stats.mstat_stats.stats_tx) >> 2;
405 rx_len = sizeof(bp->slowpath->
406 mac_stats.mstat_stats.stats_rx) >> 2;
407 break;
408 }
409
410 /* TX stats */
411 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
412 dmae->opcode = opcode;
413 dmae->src_addr_lo = tx_src_addr_lo;
414 dmae->src_addr_hi = 0;
415 dmae->len = tx_len;
416 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
417 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
418 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
419 dmae->comp_addr_hi = 0;
420 dmae->comp_val = 1;
421
422 /* RX stats */
423 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
424 dmae->opcode = opcode;
425 dmae->src_addr_hi = 0;
426 dmae->src_addr_lo = rx_src_addr_lo;
427 dmae->dst_addr_lo =
428 U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
429 dmae->dst_addr_hi =
430 U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
431 dmae->len = rx_len;
432 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
433 dmae->comp_addr_hi = 0;
434 dmae->comp_val = 1;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000435 }
436
437 /* NIG */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300438 if (!CHIP_IS_E3(bp)) {
439 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
440 dmae->opcode = opcode;
441 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
442 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
443 dmae->src_addr_hi = 0;
444 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
445 offsetof(struct nig_stats, egress_mac_pkt0_lo));
446 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
447 offsetof(struct nig_stats, egress_mac_pkt0_lo));
448 dmae->len = (2*sizeof(u32)) >> 2;
449 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
450 dmae->comp_addr_hi = 0;
451 dmae->comp_val = 1;
452
453 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
454 dmae->opcode = opcode;
455 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
456 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
457 dmae->src_addr_hi = 0;
458 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
459 offsetof(struct nig_stats, egress_mac_pkt1_lo));
460 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
461 offsetof(struct nig_stats, egress_mac_pkt1_lo));
462 dmae->len = (2*sizeof(u32)) >> 2;
463 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
464 dmae->comp_addr_hi = 0;
465 dmae->comp_val = 1;
466 }
467
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000468 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300469 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
470 true, DMAE_COMP_PCI);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000471 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
472 NIG_REG_STAT0_BRB_DISCARD) >> 2;
473 dmae->src_addr_hi = 0;
474 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
475 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
476 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000477
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000478 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
479 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
480 dmae->comp_val = DMAE_COMP_VAL;
481
482 *stats_comp = 0;
483}
484
485static void bnx2x_func_stats_init(struct bnx2x *bp)
486{
487 struct dmae_command *dmae = &bp->stats_dmae;
488 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
489
490 /* sanity */
491 if (!bp->func_stx) {
492 BNX2X_ERR("BUG!\n");
493 return;
494 }
495
496 bp->executer_idx = 0;
497 memset(dmae, 0, sizeof(struct dmae_command));
498
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000499 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
500 true, DMAE_COMP_PCI);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000501 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
502 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
503 dmae->dst_addr_lo = bp->func_stx >> 2;
504 dmae->dst_addr_hi = 0;
505 dmae->len = sizeof(struct host_func_stats) >> 2;
506 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
507 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
508 dmae->comp_val = DMAE_COMP_VAL;
509
510 *stats_comp = 0;
511}
512
Dmitry Kravkov507393e2013-08-13 02:24:59 +0300513/* should be called under stats_sema */
Yuval Mintzdff173d2015-03-23 10:56:14 +0200514static void bnx2x_stats_start(struct bnx2x *bp)
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000515{
Ariel Eliorc730b172013-08-28 01:13:00 +0300516 if (IS_PF(bp)) {
517 if (bp->port.pmf)
518 bnx2x_port_stats_init(bp);
Ariel Elior67c431a2013-01-01 05:22:36 +0000519
Ariel Eliorc730b172013-08-28 01:13:00 +0300520 else if (bp->func_stx)
521 bnx2x_func_stats_init(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000522
Ariel Eliorc730b172013-08-28 01:13:00 +0300523 bnx2x_hw_stats_post(bp);
524 bnx2x_storm_stats_post(bp);
525 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000526}
527
528static void bnx2x_stats_pmf_start(struct bnx2x *bp)
529{
530 bnx2x_stats_comp(bp);
Yuval Mintzdff173d2015-03-23 10:56:14 +0200531 bnx2x_stats_pmf_update(bp);
532 bnx2x_stats_start(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000533}
534
535static void bnx2x_stats_restart(struct bnx2x *bp)
536{
Ariel Elior67c431a2013-01-01 05:22:36 +0000537 /* vfs travel through here as part of the statistics FSM, but no action
538 * is required
539 */
540 if (IS_VF(bp))
541 return;
Yuval Mintzdff173d2015-03-23 10:56:14 +0200542
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000543 bnx2x_stats_comp(bp);
Yuval Mintzdff173d2015-03-23 10:56:14 +0200544 bnx2x_stats_start(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000545}
546
547static void bnx2x_bmac_stats_update(struct bnx2x *bp)
548{
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000549 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
550 struct bnx2x_eth_stats *estats = &bp->eth_stats;
551 struct {
552 u32 lo;
553 u32 hi;
554 } diff;
555
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000556 if (CHIP_IS_E1x(bp)) {
557 struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats);
558
559 /* the macros below will use "bmac1_stats" type */
560 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
561 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
562 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
563 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
564 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
565 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
566 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
567 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300568 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
569
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000570 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
571 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
572 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
573 UPDATE_STAT64(tx_stat_gt127,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000574 tx_stat_etherstatspkts65octetsto127octets);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000575 UPDATE_STAT64(tx_stat_gt255,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000576 tx_stat_etherstatspkts128octetsto255octets);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000577 UPDATE_STAT64(tx_stat_gt511,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000578 tx_stat_etherstatspkts256octetsto511octets);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000579 UPDATE_STAT64(tx_stat_gt1023,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000580 tx_stat_etherstatspkts512octetsto1023octets);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000581 UPDATE_STAT64(tx_stat_gt1518,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000582 tx_stat_etherstatspkts1024octetsto1522octets);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300583 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
584 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
585 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
586 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000587 UPDATE_STAT64(tx_stat_gterr,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000588 tx_stat_dot3statsinternalmactransmiterrors);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300589 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000590
591 } else {
592 struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats);
593
594 /* the macros below will use "bmac2_stats" type */
595 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
596 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
597 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
598 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
599 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
600 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
601 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
602 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300603 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000604 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
605 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
606 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
607 UPDATE_STAT64(tx_stat_gt127,
608 tx_stat_etherstatspkts65octetsto127octets);
609 UPDATE_STAT64(tx_stat_gt255,
610 tx_stat_etherstatspkts128octetsto255octets);
611 UPDATE_STAT64(tx_stat_gt511,
612 tx_stat_etherstatspkts256octetsto511octets);
613 UPDATE_STAT64(tx_stat_gt1023,
614 tx_stat_etherstatspkts512octetsto1023octets);
615 UPDATE_STAT64(tx_stat_gt1518,
616 tx_stat_etherstatspkts1024octetsto1522octets);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300617 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
618 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
619 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
620 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000621 UPDATE_STAT64(tx_stat_gterr,
622 tx_stat_dot3statsinternalmactransmiterrors);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300623 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000624
625 /* collect PFC stats */
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000626 pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
627 pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000628
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000629 pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
630 pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000631 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000632
633 estats->pause_frames_received_hi =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300634 pstats->mac_stx[1].rx_stat_mac_xpf_hi;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000635 estats->pause_frames_received_lo =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300636 pstats->mac_stx[1].rx_stat_mac_xpf_lo;
637
638 estats->pause_frames_sent_hi =
639 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
640 estats->pause_frames_sent_lo =
641 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000642
643 estats->pfc_frames_received_hi =
644 pstats->pfc_frames_rx_hi;
645 estats->pfc_frames_received_lo =
646 pstats->pfc_frames_rx_lo;
647 estats->pfc_frames_sent_hi =
648 pstats->pfc_frames_tx_hi;
649 estats->pfc_frames_sent_lo =
650 pstats->pfc_frames_tx_lo;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300651}
652
653static void bnx2x_mstat_stats_update(struct bnx2x *bp)
654{
655 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
656 struct bnx2x_eth_stats *estats = &bp->eth_stats;
657
658 struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats);
659
660 ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets);
661 ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors);
662 ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts);
663 ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong);
664 ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments);
665 ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived);
666 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered);
667 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf);
668 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
669 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
670
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000671 /* collect pfc stats */
672 ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi,
673 pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo);
674 ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi,
675 pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300676
677 ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
678 ADD_STAT64(stats_tx.tx_gt127,
679 tx_stat_etherstatspkts65octetsto127octets);
680 ADD_STAT64(stats_tx.tx_gt255,
681 tx_stat_etherstatspkts128octetsto255octets);
682 ADD_STAT64(stats_tx.tx_gt511,
683 tx_stat_etherstatspkts256octetsto511octets);
684 ADD_STAT64(stats_tx.tx_gt1023,
685 tx_stat_etherstatspkts512octetsto1023octets);
686 ADD_STAT64(stats_tx.tx_gt1518,
687 tx_stat_etherstatspkts1024octetsto1522octets);
688 ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047);
689
690 ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095);
691 ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216);
692 ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383);
693
694 ADD_STAT64(stats_tx.tx_gterr,
695 tx_stat_dot3statsinternalmactransmiterrors);
696 ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl);
697
Mintz Yuval1355b702012-02-15 02:10:22 +0000698 estats->etherstatspkts1024octetsto1522octets_hi =
699 pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi;
700 estats->etherstatspkts1024octetsto1522octets_lo =
701 pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo;
702
703 estats->etherstatspktsover1522octets_hi =
704 pstats->mac_stx[1].tx_stat_mac_2047_hi;
705 estats->etherstatspktsover1522octets_lo =
706 pstats->mac_stx[1].tx_stat_mac_2047_lo;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300707
708 ADD_64(estats->etherstatspktsover1522octets_hi,
Mintz Yuval1355b702012-02-15 02:10:22 +0000709 pstats->mac_stx[1].tx_stat_mac_4095_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300710 estats->etherstatspktsover1522octets_lo,
Mintz Yuval1355b702012-02-15 02:10:22 +0000711 pstats->mac_stx[1].tx_stat_mac_4095_lo);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300712
713 ADD_64(estats->etherstatspktsover1522octets_hi,
Mintz Yuval1355b702012-02-15 02:10:22 +0000714 pstats->mac_stx[1].tx_stat_mac_9216_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300715 estats->etherstatspktsover1522octets_lo,
Mintz Yuval1355b702012-02-15 02:10:22 +0000716 pstats->mac_stx[1].tx_stat_mac_9216_lo);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300717
718 ADD_64(estats->etherstatspktsover1522octets_hi,
Mintz Yuval1355b702012-02-15 02:10:22 +0000719 pstats->mac_stx[1].tx_stat_mac_16383_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300720 estats->etherstatspktsover1522octets_lo,
Mintz Yuval1355b702012-02-15 02:10:22 +0000721 pstats->mac_stx[1].tx_stat_mac_16383_lo);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300722
723 estats->pause_frames_received_hi =
724 pstats->mac_stx[1].rx_stat_mac_xpf_hi;
725 estats->pause_frames_received_lo =
726 pstats->mac_stx[1].rx_stat_mac_xpf_lo;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000727
728 estats->pause_frames_sent_hi =
729 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
730 estats->pause_frames_sent_lo =
731 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000732
733 estats->pfc_frames_received_hi =
734 pstats->pfc_frames_rx_hi;
735 estats->pfc_frames_received_lo =
736 pstats->pfc_frames_rx_lo;
737 estats->pfc_frames_sent_hi =
738 pstats->pfc_frames_tx_hi;
739 estats->pfc_frames_sent_lo =
740 pstats->pfc_frames_tx_lo;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000741}
742
743static void bnx2x_emac_stats_update(struct bnx2x *bp)
744{
745 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
746 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
747 struct bnx2x_eth_stats *estats = &bp->eth_stats;
748
749 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
750 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
751 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
752 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
753 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
754 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
755 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
756 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
757 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
758 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
759 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
760 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
761 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
762 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
763 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
764 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
765 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
766 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
767 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
768 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
769 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
770 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
771 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
772 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
773 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
774 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
775 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
776 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
777 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
778 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
779 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
780
781 estats->pause_frames_received_hi =
782 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
783 estats->pause_frames_received_lo =
784 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
785 ADD_64(estats->pause_frames_received_hi,
786 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
787 estats->pause_frames_received_lo,
788 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
789
790 estats->pause_frames_sent_hi =
791 pstats->mac_stx[1].tx_stat_outxonsent_hi;
792 estats->pause_frames_sent_lo =
793 pstats->mac_stx[1].tx_stat_outxonsent_lo;
794 ADD_64(estats->pause_frames_sent_hi,
795 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
796 estats->pause_frames_sent_lo,
797 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
798}
799
800static int bnx2x_hw_stats_update(struct bnx2x *bp)
801{
802 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
803 struct nig_stats *old = &(bp->port.old_nig_stats);
804 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
805 struct bnx2x_eth_stats *estats = &bp->eth_stats;
806 struct {
807 u32 lo;
808 u32 hi;
809 } diff;
810
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300811 switch (bp->link_vars.mac_type) {
812 case MAC_TYPE_BMAC:
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000813 bnx2x_bmac_stats_update(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300814 break;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000815
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300816 case MAC_TYPE_EMAC:
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000817 bnx2x_emac_stats_update(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300818 break;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000819
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300820 case MAC_TYPE_UMAC:
821 case MAC_TYPE_XMAC:
822 bnx2x_mstat_stats_update(bp);
823 break;
824
825 case MAC_TYPE_NONE: /* unreached */
David S. Miller8decf862011-09-22 03:23:13 -0400826 DP(BNX2X_MSG_STATS,
827 "stats updated by DMAE but no MAC active\n");
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000828 return -1;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300829
830 default: /* unreached */
831 BNX2X_ERR("Unknown MAC type\n");
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000832 }
833
834 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
835 new->brb_discard - old->brb_discard);
836 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
837 new->brb_truncate - old->brb_truncate);
838
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300839 if (!CHIP_IS_E3(bp)) {
840 UPDATE_STAT64_NIG(egress_mac_pkt0,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000841 etherstatspkts1024octetsto1522octets);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300842 UPDATE_STAT64_NIG(egress_mac_pkt1,
843 etherstatspktsover1522octets);
844 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000845
846 memcpy(old, new, sizeof(struct nig_stats));
847
848 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
849 sizeof(struct mac_stx));
850 estats->brb_drop_hi = pstats->brb_drop_hi;
851 estats->brb_drop_lo = pstats->brb_drop_lo;
852
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000853 pstats->host_port_stats_counter++;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000854
Yuval Mintzc20cd5d2012-07-23 21:16:06 +0000855 if (CHIP_IS_E3(bp)) {
856 u32 lpi_reg = BP_PORT(bp) ? MISC_REG_CPMU_LP_SM_ENT_CNT_P1
857 : MISC_REG_CPMU_LP_SM_ENT_CNT_P0;
858 estats->eee_tx_lpi += REG_RD(bp, lpi_reg);
859 }
Yuval Mintzc8c60d82012-06-06 17:13:07 +0000860
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000861 if (!BP_NOMCP(bp)) {
862 u32 nig_timer_max =
863 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
864 if (nig_timer_max != estats->nig_timer_max) {
865 estats->nig_timer_max = nig_timer_max;
866 BNX2X_ERR("NIG timer max (%u)\n",
867 estats->nig_timer_max);
868 }
869 }
870
871 return 0;
872}
873
Ariel Elior67c431a2013-01-01 05:22:36 +0000874static int bnx2x_storm_stats_validate_counters(struct bnx2x *bp)
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000875{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300876 struct stats_counter *counters = &bp->fw_stats_data->storm_counters;
David S. Millerbb7e95c2010-07-27 21:01:35 -0700877 u16 cur_stats_counter;
David S. Millerbb7e95c2010-07-27 21:01:35 -0700878 /* Make sure we use the value of the counter
879 * used for sending the last stats ramrod.
880 */
David S. Millerbb7e95c2010-07-27 21:01:35 -0700881 cur_stats_counter = bp->stats_counter - 1;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000882
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300883 /* are storm stats valid? */
884 if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000885 DP(BNX2X_MSG_STATS,
886 "stats not updated by xstorm xstorm counter (0x%x) != stats_counter (0x%x)\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300887 le16_to_cpu(counters->xstats_counter), bp->stats_counter);
888 return -EAGAIN;
889 }
890
891 if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000892 DP(BNX2X_MSG_STATS,
893 "stats not updated by ustorm ustorm counter (0x%x) != stats_counter (0x%x)\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300894 le16_to_cpu(counters->ustats_counter), bp->stats_counter);
895 return -EAGAIN;
896 }
897
898 if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000899 DP(BNX2X_MSG_STATS,
900 "stats not updated by cstorm cstorm counter (0x%x) != stats_counter (0x%x)\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300901 le16_to_cpu(counters->cstats_counter), bp->stats_counter);
902 return -EAGAIN;
903 }
904
905 if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000906 DP(BNX2X_MSG_STATS,
907 "stats not updated by tstorm tstorm counter (0x%x) != stats_counter (0x%x)\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300908 le16_to_cpu(counters->tstats_counter), bp->stats_counter);
909 return -EAGAIN;
910 }
Ariel Elior67c431a2013-01-01 05:22:36 +0000911 return 0;
912}
913
914static int bnx2x_storm_stats_update(struct bnx2x *bp)
915{
916 struct tstorm_per_port_stats *tport =
917 &bp->fw_stats_data->port.tstorm_port_statistics;
918 struct tstorm_per_pf_stats *tfunc =
919 &bp->fw_stats_data->pf.tstorm_pf_statistics;
920 struct host_func_stats *fstats = &bp->func_stats;
921 struct bnx2x_eth_stats *estats = &bp->eth_stats;
922 struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old;
923 int i;
924
925 /* vfs stat counter is managed by pf */
926 if (IS_PF(bp) && bnx2x_storm_stats_validate_counters(bp))
927 return -EAGAIN;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300928
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000929 estats->error_bytes_received_hi = 0;
930 estats->error_bytes_received_lo = 0;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000931
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000932 for_each_eth_queue(bp, i) {
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000933 struct bnx2x_fastpath *fp = &bp->fp[i];
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300934 struct tstorm_per_queue_stats *tclient =
935 &bp->fw_stats_data->queue_stats[i].
936 tstorm_queue_statistics;
Barak Witkowski15192a82012-06-19 07:48:28 +0000937 struct tstorm_per_queue_stats *old_tclient =
938 &bnx2x_fp_stats(bp, fp)->old_tclient;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300939 struct ustorm_per_queue_stats *uclient =
940 &bp->fw_stats_data->queue_stats[i].
941 ustorm_queue_statistics;
Barak Witkowski15192a82012-06-19 07:48:28 +0000942 struct ustorm_per_queue_stats *old_uclient =
943 &bnx2x_fp_stats(bp, fp)->old_uclient;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300944 struct xstorm_per_queue_stats *xclient =
945 &bp->fw_stats_data->queue_stats[i].
946 xstorm_queue_statistics;
Barak Witkowski15192a82012-06-19 07:48:28 +0000947 struct xstorm_per_queue_stats *old_xclient =
948 &bnx2x_fp_stats(bp, fp)->old_xclient;
949 struct bnx2x_eth_q_stats *qstats =
950 &bnx2x_fp_stats(bp, fp)->eth_q_stats;
951 struct bnx2x_eth_q_stats_old *qstats_old =
952 &bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
Mintz Yuval1355b702012-02-15 02:10:22 +0000953
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000954 u32 diff;
955
Merav Sicron51c1a582012-03-18 10:33:38 +0000956 DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, bcast_sent 0x%x mcast_sent 0x%x\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300957 i, xclient->ucast_pkts_sent,
958 xclient->bcast_pkts_sent, xclient->mcast_pkts_sent);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000959
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300960 DP(BNX2X_MSG_STATS, "---------------\n");
961
Mintz Yuval1355b702012-02-15 02:10:22 +0000962 UPDATE_QSTAT(tclient->rcv_bcast_bytes,
963 total_broadcast_bytes_received);
964 UPDATE_QSTAT(tclient->rcv_mcast_bytes,
965 total_multicast_bytes_received);
966 UPDATE_QSTAT(tclient->rcv_ucast_bytes,
967 total_unicast_bytes_received);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300968
969 /*
970 * sum to total_bytes_received all
971 * unicast/multicast/broadcast
972 */
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000973 qstats->total_bytes_received_hi =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300974 qstats->total_broadcast_bytes_received_hi;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000975 qstats->total_bytes_received_lo =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300976 qstats->total_broadcast_bytes_received_lo;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000977
978 ADD_64(qstats->total_bytes_received_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300979 qstats->total_multicast_bytes_received_hi,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000980 qstats->total_bytes_received_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300981 qstats->total_multicast_bytes_received_lo);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000982
983 ADD_64(qstats->total_bytes_received_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300984 qstats->total_unicast_bytes_received_hi,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000985 qstats->total_bytes_received_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300986 qstats->total_unicast_bytes_received_lo);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000987
988 qstats->valid_bytes_received_hi =
989 qstats->total_bytes_received_hi;
990 qstats->valid_bytes_received_lo =
991 qstats->total_bytes_received_lo;
992
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300993 UPDATE_EXTEND_TSTAT(rcv_ucast_pkts,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000994 total_unicast_packets_received);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300995 UPDATE_EXTEND_TSTAT(rcv_mcast_pkts,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000996 total_multicast_packets_received);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300997 UPDATE_EXTEND_TSTAT(rcv_bcast_pkts,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000998 total_broadcast_packets_received);
Mintz Yuval1355b702012-02-15 02:10:22 +0000999 UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard,
Yuval Mintz86564c32013-01-23 03:21:50 +00001000 etherstatsoverrsizepkts, 32);
1001 UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard, 16);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001002
1003 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
1004 total_unicast_packets_received);
1005 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
1006 total_multicast_packets_received);
1007 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
1008 total_broadcast_packets_received);
Mintz Yuval1355b702012-02-15 02:10:22 +00001009 UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard);
1010 UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard);
1011 UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001012
Mintz Yuval1355b702012-02-15 02:10:22 +00001013 UPDATE_QSTAT(xclient->bcast_bytes_sent,
1014 total_broadcast_bytes_transmitted);
1015 UPDATE_QSTAT(xclient->mcast_bytes_sent,
1016 total_multicast_bytes_transmitted);
1017 UPDATE_QSTAT(xclient->ucast_bytes_sent,
1018 total_unicast_bytes_transmitted);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001019
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001020 /*
1021 * sum to total_bytes_transmitted all
1022 * unicast/multicast/broadcast
1023 */
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001024 qstats->total_bytes_transmitted_hi =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001025 qstats->total_unicast_bytes_transmitted_hi;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001026 qstats->total_bytes_transmitted_lo =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001027 qstats->total_unicast_bytes_transmitted_lo;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001028
1029 ADD_64(qstats->total_bytes_transmitted_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001030 qstats->total_broadcast_bytes_transmitted_hi,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001031 qstats->total_bytes_transmitted_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001032 qstats->total_broadcast_bytes_transmitted_lo);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001033
1034 ADD_64(qstats->total_bytes_transmitted_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001035 qstats->total_multicast_bytes_transmitted_hi,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001036 qstats->total_bytes_transmitted_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001037 qstats->total_multicast_bytes_transmitted_lo);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001038
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001039 UPDATE_EXTEND_XSTAT(ucast_pkts_sent,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001040 total_unicast_packets_transmitted);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001041 UPDATE_EXTEND_XSTAT(mcast_pkts_sent,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001042 total_multicast_packets_transmitted);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001043 UPDATE_EXTEND_XSTAT(bcast_pkts_sent,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001044 total_broadcast_packets_transmitted);
1045
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001046 UPDATE_EXTEND_TSTAT(checksum_discard,
1047 total_packets_received_checksum_discarded);
1048 UPDATE_EXTEND_TSTAT(ttl0_discard,
1049 total_packets_received_ttl0_discarded);
1050
1051 UPDATE_EXTEND_XSTAT(error_drop_pkts,
1052 total_transmitted_dropped_packets_error);
1053
1054 /* TPA aggregations completed */
Mintz Yuval1355b702012-02-15 02:10:22 +00001055 UPDATE_EXTEND_E_USTAT(coalesced_events, total_tpa_aggregations);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001056 /* Number of network frames aggregated by TPA */
Mintz Yuval1355b702012-02-15 02:10:22 +00001057 UPDATE_EXTEND_E_USTAT(coalesced_pkts,
1058 total_tpa_aggregated_frames);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001059 /* Total number of bytes in completed TPA aggregations */
Mintz Yuval1355b702012-02-15 02:10:22 +00001060 UPDATE_QSTAT(uclient->coalesced_bytes, total_tpa_bytes);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001061
Mintz Yuval1355b702012-02-15 02:10:22 +00001062 UPDATE_ESTAT_QSTAT_64(total_tpa_bytes);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001063
Mintz Yuval1355b702012-02-15 02:10:22 +00001064 UPDATE_FSTAT_QSTAT(total_bytes_received);
1065 UPDATE_FSTAT_QSTAT(total_bytes_transmitted);
1066 UPDATE_FSTAT_QSTAT(total_unicast_packets_received);
1067 UPDATE_FSTAT_QSTAT(total_multicast_packets_received);
1068 UPDATE_FSTAT_QSTAT(total_broadcast_packets_received);
1069 UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted);
1070 UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted);
1071 UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted);
1072 UPDATE_FSTAT_QSTAT(valid_bytes_received);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001073 }
1074
Mintz Yuval1355b702012-02-15 02:10:22 +00001075 ADD_64(estats->total_bytes_received_hi,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001076 estats->rx_stat_ifhcinbadoctets_hi,
Mintz Yuval1355b702012-02-15 02:10:22 +00001077 estats->total_bytes_received_lo,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001078 estats->rx_stat_ifhcinbadoctets_lo);
1079
Yuval Mintz86564c32013-01-23 03:21:50 +00001080 ADD_64_LE(estats->total_bytes_received_hi,
1081 tfunc->rcv_error_bytes.hi,
1082 estats->total_bytes_received_lo,
1083 tfunc->rcv_error_bytes.lo);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001084
Yuval Mintz86564c32013-01-23 03:21:50 +00001085 ADD_64_LE(estats->error_bytes_received_hi,
1086 tfunc->rcv_error_bytes.hi,
1087 estats->error_bytes_received_lo,
1088 tfunc->rcv_error_bytes.lo);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001089
Mintz Yuval1355b702012-02-15 02:10:22 +00001090 UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong);
1091
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001092 ADD_64(estats->error_bytes_received_hi,
1093 estats->rx_stat_ifhcinbadoctets_hi,
1094 estats->error_bytes_received_lo,
1095 estats->rx_stat_ifhcinbadoctets_lo);
1096
1097 if (bp->port.pmf) {
Mintz Yuval1355b702012-02-15 02:10:22 +00001098 struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old;
1099 UPDATE_FW_STAT(mac_filter_discard);
1100 UPDATE_FW_STAT(mf_tag_discard);
1101 UPDATE_FW_STAT(brb_truncate_discard);
1102 UPDATE_FW_STAT(mac_discard);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001103 }
1104
1105 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
1106
1107 bp->stats_pending = 0;
1108
1109 return 0;
1110}
1111
1112static void bnx2x_net_stats_update(struct bnx2x *bp)
1113{
1114 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1115 struct net_device_stats *nstats = &bp->dev->stats;
Eric Dumazetc6a056a2010-08-23 19:44:08 +00001116 unsigned long tmp;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001117 int i;
1118
1119 nstats->rx_packets =
1120 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
1121 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
1122 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
1123
1124 nstats->tx_packets =
1125 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
1126 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
1127 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
1128
1129 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
1130
1131 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
1132
Eric Dumazetc6a056a2010-08-23 19:44:08 +00001133 tmp = estats->mac_discard;
Barak Witkowski15192a82012-06-19 07:48:28 +00001134 for_each_rx_queue(bp, i) {
1135 struct tstorm_per_queue_stats *old_tclient =
1136 &bp->fp_stats[i].old_tclient;
1137 tmp += le32_to_cpu(old_tclient->checksum_discard);
1138 }
Mintz Yuval1355b702012-02-15 02:10:22 +00001139 nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001140
1141 nstats->tx_dropped = 0;
1142
1143 nstats->multicast =
1144 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
1145
1146 nstats->collisions =
1147 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
1148
1149 nstats->rx_length_errors =
1150 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
1151 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
1152 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
1153 bnx2x_hilo(&estats->brb_truncate_hi);
1154 nstats->rx_crc_errors =
1155 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
1156 nstats->rx_frame_errors =
1157 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
1158 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001159 nstats->rx_missed_errors = 0;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001160
1161 nstats->rx_errors = nstats->rx_length_errors +
1162 nstats->rx_over_errors +
1163 nstats->rx_crc_errors +
1164 nstats->rx_frame_errors +
1165 nstats->rx_fifo_errors +
1166 nstats->rx_missed_errors;
1167
1168 nstats->tx_aborted_errors =
1169 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
1170 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
1171 nstats->tx_carrier_errors =
1172 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
1173 nstats->tx_fifo_errors = 0;
1174 nstats->tx_heartbeat_errors = 0;
1175 nstats->tx_window_errors = 0;
1176
1177 nstats->tx_errors = nstats->tx_aborted_errors +
1178 nstats->tx_carrier_errors +
1179 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
1180}
1181
1182static void bnx2x_drv_stats_update(struct bnx2x *bp)
1183{
1184 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1185 int i;
1186
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001187 for_each_queue(bp, i) {
Barak Witkowski15192a82012-06-19 07:48:28 +00001188 struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
Mintz Yuval1355b702012-02-15 02:10:22 +00001189 struct bnx2x_eth_q_stats_old *qstats_old =
Barak Witkowski15192a82012-06-19 07:48:28 +00001190 &bp->fp_stats[i].eth_q_stats_old;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001191
Mintz Yuval1355b702012-02-15 02:10:22 +00001192 UPDATE_ESTAT_QSTAT(driver_xoff);
1193 UPDATE_ESTAT_QSTAT(rx_err_discard_pkt);
1194 UPDATE_ESTAT_QSTAT(rx_skb_alloc_failed);
1195 UPDATE_ESTAT_QSTAT(hw_csum_err);
Dmitry Kravkovc96bdc02012-12-02 04:05:48 +00001196 UPDATE_ESTAT_QSTAT(driver_filtered_tx_pkt);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001197 }
1198}
1199
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001200static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp)
1201{
1202 u32 val;
1203
1204 if (SHMEM2_HAS(bp, edebug_driver_if[1])) {
1205 val = SHMEM2_RD(bp, edebug_driver_if[1]);
1206
1207 if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT)
1208 return true;
1209 }
1210
1211 return false;
1212}
1213
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001214static void bnx2x_stats_update(struct bnx2x *bp)
1215{
1216 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1217
Yuval Mintzdff173d2015-03-23 10:56:14 +02001218 if (bnx2x_edebug_stats_stopped(bp))
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001219 return;
1220
Ariel Elior67c431a2013-01-01 05:22:36 +00001221 if (IS_PF(bp)) {
1222 if (*stats_comp != DMAE_COMP_VAL)
Yuval Mintzdff173d2015-03-23 10:56:14 +02001223 return;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001224
Ariel Elior67c431a2013-01-01 05:22:36 +00001225 if (bp->port.pmf)
1226 bnx2x_hw_stats_update(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001227
Ariel Elior67c431a2013-01-01 05:22:36 +00001228 if (bnx2x_storm_stats_update(bp)) {
1229 if (bp->stats_pending++ == 3) {
1230 BNX2X_ERR("storm stats were not updated for 3 times\n");
1231 bnx2x_panic();
1232 }
Yuval Mintzdff173d2015-03-23 10:56:14 +02001233 return;
Dmitry Kravkovbef05402012-09-11 04:34:08 +00001234 }
Ariel Elior67c431a2013-01-01 05:22:36 +00001235 } else {
1236 /* vf doesn't collect HW statistics, and doesn't get completions
1237 * perform only update
1238 */
1239 bnx2x_storm_stats_update(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001240 }
1241
1242 bnx2x_net_stats_update(bp);
1243 bnx2x_drv_stats_update(bp);
1244
Ariel Elior67c431a2013-01-01 05:22:36 +00001245 /* vf is done */
1246 if (IS_VF(bp))
Yuval Mintzdff173d2015-03-23 10:56:14 +02001247 return;
Ariel Elior67c431a2013-01-01 05:22:36 +00001248
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001249 if (netif_msg_timer(bp)) {
1250 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001251
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001252 netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n",
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001253 estats->brb_drop_lo, estats->brb_truncate_lo);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001254 }
1255
1256 bnx2x_hw_stats_post(bp);
1257 bnx2x_storm_stats_post(bp);
1258}
1259
1260static void bnx2x_port_stats_stop(struct bnx2x *bp)
1261{
1262 struct dmae_command *dmae;
1263 u32 opcode;
1264 int loader_idx = PMF_DMAE_C(bp);
1265 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1266
1267 bp->executer_idx = 0;
1268
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001269 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001270
1271 if (bp->port.port_stx) {
1272
1273 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1274 if (bp->func_stx)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001275 dmae->opcode = bnx2x_dmae_opcode_add_comp(
1276 opcode, DMAE_COMP_GRC);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001277 else
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001278 dmae->opcode = bnx2x_dmae_opcode_add_comp(
1279 opcode, DMAE_COMP_PCI);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001280
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001281 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1282 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1283 dmae->dst_addr_lo = bp->port.port_stx >> 2;
1284 dmae->dst_addr_hi = 0;
Barak Witkowski1d187b32011-12-05 22:41:50 +00001285 dmae->len = bnx2x_get_port_stats_dma_len(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001286 if (bp->func_stx) {
1287 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
1288 dmae->comp_addr_hi = 0;
1289 dmae->comp_val = 1;
1290 } else {
1291 dmae->comp_addr_lo =
1292 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1293 dmae->comp_addr_hi =
1294 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1295 dmae->comp_val = DMAE_COMP_VAL;
1296
1297 *stats_comp = 0;
1298 }
1299 }
1300
1301 if (bp->func_stx) {
1302
1303 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001304 dmae->opcode =
1305 bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001306 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
1307 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
1308 dmae->dst_addr_lo = bp->func_stx >> 2;
1309 dmae->dst_addr_hi = 0;
1310 dmae->len = sizeof(struct host_func_stats) >> 2;
1311 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1312 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1313 dmae->comp_val = DMAE_COMP_VAL;
1314
1315 *stats_comp = 0;
1316 }
1317}
1318
1319static void bnx2x_stats_stop(struct bnx2x *bp)
1320{
Yuval Mintzdff173d2015-03-23 10:56:14 +02001321 bool update = false;
Dmitry Kravkov507393e2013-08-13 02:24:59 +03001322
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001323 bnx2x_stats_comp(bp);
1324
1325 if (bp->port.pmf)
1326 update = (bnx2x_hw_stats_update(bp) == 0);
1327
1328 update |= (bnx2x_storm_stats_update(bp) == 0);
1329
1330 if (update) {
1331 bnx2x_net_stats_update(bp);
1332
1333 if (bp->port.pmf)
1334 bnx2x_port_stats_stop(bp);
1335
1336 bnx2x_hw_stats_post(bp);
1337 bnx2x_stats_comp(bp);
1338 }
1339}
1340
1341static void bnx2x_stats_do_nothing(struct bnx2x *bp)
1342{
1343}
1344
1345static const struct {
1346 void (*action)(struct bnx2x *bp);
1347 enum bnx2x_stats_state next_state;
1348} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
1349/* state event */
1350{
1351/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
1352/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
1353/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
1354/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
1355},
1356{
1357/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
1358/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
1359/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
1360/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
1361}
1362};
1363
1364void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
1365{
Yuval Mintzdff173d2015-03-23 10:56:14 +02001366 enum bnx2x_stats_state state = bp->stats_state;
1367
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001368 if (unlikely(bp->panic))
1369 return;
Dmitry Kravkov4a025f42011-11-13 04:34:30 +00001370
Yuval Mintzdff173d2015-03-23 10:56:14 +02001371 /* Statistics update run from timer context, and we don't want to stop
1372 * that context in case someone is in the middle of a transition.
1373 * For other events, wait a bit until lock is taken.
1374 */
Yuval Mintzc6e36d82015-06-01 15:08:18 +03001375 if (down_trylock(&bp->stats_lock)) {
Yuval Mintzdff173d2015-03-23 10:56:14 +02001376 if (event == STATS_EVENT_UPDATE)
1377 return;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001378
Yuval Mintzdff173d2015-03-23 10:56:14 +02001379 DP(BNX2X_MSG_STATS,
1380 "Unlikely stats' lock contention [event %d]\n", event);
Yuval Mintzc6e36d82015-06-01 15:08:18 +03001381 if (unlikely(down_timeout(&bp->stats_lock, HZ / 10))) {
1382 BNX2X_ERR("Failed to take stats lock [event %d]\n",
1383 event);
1384 return;
1385 }
Yuval Mintzdff173d2015-03-23 10:56:14 +02001386 }
1387
1388 bnx2x_stats_stm[state][event].action(bp);
1389 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
1390
Yuval Mintzc6e36d82015-06-01 15:08:18 +03001391 up(&bp->stats_lock);
Dmitry Kravkov4a025f42011-11-13 04:34:30 +00001392
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001393 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
1394 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
1395 state, event, bp->stats_state);
1396}
1397
1398static void bnx2x_port_stats_base_init(struct bnx2x *bp)
1399{
1400 struct dmae_command *dmae;
1401 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1402
1403 /* sanity */
1404 if (!bp->port.pmf || !bp->port.port_stx) {
1405 BNX2X_ERR("BUG!\n");
1406 return;
1407 }
1408
1409 bp->executer_idx = 0;
1410
1411 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001412 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
1413 true, DMAE_COMP_PCI);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001414 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1415 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1416 dmae->dst_addr_lo = bp->port.port_stx >> 2;
1417 dmae->dst_addr_hi = 0;
Barak Witkowski1d187b32011-12-05 22:41:50 +00001418 dmae->len = bnx2x_get_port_stats_dma_len(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001419 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1420 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1421 dmae->comp_val = DMAE_COMP_VAL;
1422
1423 *stats_comp = 0;
1424 bnx2x_hw_stats_post(bp);
1425 bnx2x_stats_comp(bp);
1426}
1427
Ben Hutchings1aa8b472012-07-10 10:56:59 +00001428/* This function will prepare the statistics ramrod data the way
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001429 * we will only have to increment the statistics counter and
1430 * send the ramrod each time we have to.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001431 */
Eric Dumazet1191cb82012-04-27 21:39:21 +00001432static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001433{
1434 int i;
Barak Witkowski50f0a562011-12-05 21:52:23 +00001435 int first_queue_query_index;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001436 struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr;
1437
1438 dma_addr_t cur_data_offset;
1439 struct stats_query_entry *cur_query_entry;
1440
1441 stats_hdr->cmd_num = bp->fw_stats_num;
1442 stats_hdr->drv_stats_counter = 0;
1443
1444 /* storm_counters struct contains the counters of completed
1445 * statistics requests per storm which are incremented by FW
1446 * each time it completes hadning a statistics ramrod. We will
1447 * check these counters in the timer handler and discard a
1448 * (statistics) ramrod completion.
1449 */
1450 cur_data_offset = bp->fw_stats_data_mapping +
1451 offsetof(struct bnx2x_fw_stats_data, storm_counters);
1452
1453 stats_hdr->stats_counters_addrs.hi =
1454 cpu_to_le32(U64_HI(cur_data_offset));
1455 stats_hdr->stats_counters_addrs.lo =
1456 cpu_to_le32(U64_LO(cur_data_offset));
1457
1458 /* prepare to the first stats ramrod (will be completed with
1459 * the counters equal to zero) - init counters to somethig different.
1460 */
1461 memset(&bp->fw_stats_data->storm_counters, 0xff,
1462 sizeof(struct stats_counter));
1463
1464 /**** Port FW statistics data ****/
1465 cur_data_offset = bp->fw_stats_data_mapping +
1466 offsetof(struct bnx2x_fw_stats_data, port);
1467
1468 cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX];
1469
1470 cur_query_entry->kind = STATS_TYPE_PORT;
1471 /* For port query index is a DONT CARE */
1472 cur_query_entry->index = BP_PORT(bp);
1473 /* For port query funcID is a DONT CARE */
1474 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1475 cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
1476 cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
1477
1478 /**** PF FW statistics data ****/
1479 cur_data_offset = bp->fw_stats_data_mapping +
1480 offsetof(struct bnx2x_fw_stats_data, pf);
1481
1482 cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX];
1483
1484 cur_query_entry->kind = STATS_TYPE_PF;
1485 /* For PF query index is a DONT CARE */
1486 cur_query_entry->index = BP_PORT(bp);
1487 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1488 cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
1489 cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
1490
Barak Witkowski50f0a562011-12-05 21:52:23 +00001491 /**** FCoE FW statistics data ****/
1492 if (!NO_FCOE(bp)) {
1493 cur_data_offset = bp->fw_stats_data_mapping +
1494 offsetof(struct bnx2x_fw_stats_data, fcoe);
1495
1496 cur_query_entry =
1497 &bp->fw_stats_req->query[BNX2X_FCOE_QUERY_IDX];
1498
1499 cur_query_entry->kind = STATS_TYPE_FCOE;
1500 /* For FCoE query index is a DONT CARE */
1501 cur_query_entry->index = BP_PORT(bp);
1502 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1503 cur_query_entry->address.hi =
1504 cpu_to_le32(U64_HI(cur_data_offset));
1505 cur_query_entry->address.lo =
1506 cpu_to_le32(U64_LO(cur_data_offset));
1507 }
1508
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001509 /**** Clients' queries ****/
1510 cur_data_offset = bp->fw_stats_data_mapping +
1511 offsetof(struct bnx2x_fw_stats_data, queue_stats);
1512
Barak Witkowski50f0a562011-12-05 21:52:23 +00001513 /* first queue query index depends whether FCoE offloaded request will
1514 * be included in the ramrod
1515 */
1516 if (!NO_FCOE(bp))
1517 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX;
1518 else
1519 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1;
1520
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001521 for_each_eth_queue(bp, i) {
1522 cur_query_entry =
1523 &bp->fw_stats_req->
Barak Witkowski50f0a562011-12-05 21:52:23 +00001524 query[first_queue_query_index + i];
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001525
1526 cur_query_entry->kind = STATS_TYPE_QUEUE;
1527 cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]);
1528 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1529 cur_query_entry->address.hi =
1530 cpu_to_le32(U64_HI(cur_data_offset));
1531 cur_query_entry->address.lo =
1532 cpu_to_le32(U64_LO(cur_data_offset));
1533
1534 cur_data_offset += sizeof(struct per_queue_stats);
1535 }
Barak Witkowski50f0a562011-12-05 21:52:23 +00001536
1537 /* add FCoE queue query if needed */
1538 if (!NO_FCOE(bp)) {
1539 cur_query_entry =
1540 &bp->fw_stats_req->
1541 query[first_queue_query_index + i];
1542
1543 cur_query_entry->kind = STATS_TYPE_QUEUE;
Merav Sicron65565882012-06-19 07:48:26 +00001544 cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX(bp)]);
Barak Witkowski50f0a562011-12-05 21:52:23 +00001545 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1546 cur_query_entry->address.hi =
1547 cpu_to_le32(U64_HI(cur_data_offset));
1548 cur_query_entry->address.lo =
1549 cpu_to_le32(U64_LO(cur_data_offset));
1550 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001551}
1552
Ariel Elior5b0752c2013-03-27 01:05:15 +00001553void bnx2x_memset_stats(struct bnx2x *bp)
1554{
1555 int i;
1556
1557 /* function stats */
1558 for_each_queue(bp, i) {
1559 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i];
1560
1561 memset(&fp_stats->old_tclient, 0,
1562 sizeof(fp_stats->old_tclient));
1563 memset(&fp_stats->old_uclient, 0,
1564 sizeof(fp_stats->old_uclient));
1565 memset(&fp_stats->old_xclient, 0,
1566 sizeof(fp_stats->old_xclient));
1567 if (bp->stats_init) {
1568 memset(&fp_stats->eth_q_stats, 0,
1569 sizeof(fp_stats->eth_q_stats));
1570 memset(&fp_stats->eth_q_stats_old, 0,
1571 sizeof(fp_stats->eth_q_stats_old));
1572 }
1573 }
1574
1575 memset(&bp->dev->stats, 0, sizeof(bp->dev->stats));
1576
1577 if (bp->stats_init) {
1578 memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old));
1579 memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old));
1580 memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old));
1581 memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
1582 memset(&bp->func_stats, 0, sizeof(bp->func_stats));
1583 }
1584
1585 bp->stats_state = STATS_STATE_DISABLED;
1586
1587 if (bp->port.pmf && bp->port.port_stx)
1588 bnx2x_port_stats_base_init(bp);
1589
Joe Perchesdbedd442015-03-06 20:49:12 -08001590 /* mark the end of statistics initialization */
Ariel Elior5b0752c2013-03-27 01:05:15 +00001591 bp->stats_init = false;
1592}
1593
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001594void bnx2x_stats_init(struct bnx2x *bp)
1595{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001596 int /*abs*/port = BP_PORT(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001597 int mb_idx = BP_FW_MB_IDX(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001598
Yuval Mintz14f806a2014-08-25 17:48:31 +03001599 if (IS_VF(bp)) {
1600 bnx2x_memset_stats(bp);
1601 return;
1602 }
1603
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001604 bp->stats_pending = 0;
1605 bp->executer_idx = 0;
1606 bp->stats_counter = 0;
1607
1608 /* port and func stats for management */
1609 if (!BP_NOMCP(bp)) {
1610 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001611 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001612
1613 } else {
1614 bp->port.port_stx = 0;
1615 bp->func_stx = 0;
1616 }
1617 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
1618 bp->port.port_stx, bp->func_stx);
1619
Mintz Yuval1355b702012-02-15 02:10:22 +00001620 /* pmf should retrieve port statistics from SP on a non-init*/
1621 if (!bp->stats_init && bp->port.pmf && bp->port.port_stx)
1622 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
1623
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001624 port = BP_PORT(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001625 /* port stats */
1626 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
1627 bp->port.old_nig_stats.brb_discard =
1628 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
1629 bp->port.old_nig_stats.brb_truncate =
1630 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001631 if (!CHIP_IS_E3(bp)) {
1632 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
1633 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
1634 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
1635 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
1636 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001637
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001638 /* Prepare statistics ramrod data */
1639 bnx2x_prep_fw_stats_req(bp);
Dmitry Kravkov6fe8bce2010-10-06 03:35:03 +00001640
Ariel Elior5b0752c2013-03-27 01:05:15 +00001641 /* Clean SP from previous statistics */
Mintz Yuval1355b702012-02-15 02:10:22 +00001642 if (bp->stats_init) {
Mintz Yuval1355b702012-02-15 02:10:22 +00001643 if (bp->func_stx) {
1644 memset(bnx2x_sp(bp, func_stats), 0,
1645 sizeof(struct host_func_stats));
1646 bnx2x_func_stats_init(bp);
1647 bnx2x_hw_stats_post(bp);
1648 bnx2x_stats_comp(bp);
1649 }
1650 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001651
Ariel Elior5b0752c2013-03-27 01:05:15 +00001652 bnx2x_memset_stats(bp);
Mintz Yuval1355b702012-02-15 02:10:22 +00001653}
1654
1655void bnx2x_save_statistics(struct bnx2x *bp)
1656{
1657 int i;
1658 struct net_device_stats *nstats = &bp->dev->stats;
1659
1660 /* save queue statistics */
1661 for_each_eth_queue(bp, i) {
1662 struct bnx2x_fastpath *fp = &bp->fp[i];
Barak Witkowski15192a82012-06-19 07:48:28 +00001663 struct bnx2x_eth_q_stats *qstats =
1664 &bnx2x_fp_stats(bp, fp)->eth_q_stats;
1665 struct bnx2x_eth_q_stats_old *qstats_old =
1666 &bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
Mintz Yuval1355b702012-02-15 02:10:22 +00001667
1668 UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi);
1669 UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo);
1670 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi);
1671 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo);
1672 UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi);
1673 UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo);
1674 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi);
1675 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo);
1676 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi);
1677 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo);
1678 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi);
1679 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo);
1680 UPDATE_QSTAT_OLD(total_tpa_bytes_hi);
1681 UPDATE_QSTAT_OLD(total_tpa_bytes_lo);
1682 }
1683
1684 /* save net_device_stats statistics */
1685 bp->net_stats_old.rx_dropped = nstats->rx_dropped;
1686
1687 /* store port firmware statistics */
1688 if (bp->port.pmf && IS_MF(bp)) {
1689 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1690 struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old;
1691 UPDATE_FW_STAT_OLD(mac_filter_discard);
1692 UPDATE_FW_STAT_OLD(mf_tag_discard);
1693 UPDATE_FW_STAT_OLD(brb_truncate_discard);
1694 UPDATE_FW_STAT_OLD(mac_discard);
1695 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001696}
Barak Witkowskia3348722012-04-23 03:04:46 +00001697
1698void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
1699 u32 stats_type)
1700{
1701 int i;
1702 struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats;
1703 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1704 struct per_queue_stats *fcoe_q_stats =
Merav Sicron65565882012-06-19 07:48:26 +00001705 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)];
Barak Witkowskia3348722012-04-23 03:04:46 +00001706
1707 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
1708 &fcoe_q_stats->tstorm_queue_statistics;
1709
1710 struct ustorm_per_queue_stats *fcoe_q_ustorm_stats =
1711 &fcoe_q_stats->ustorm_queue_statistics;
1712
1713 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
1714 &fcoe_q_stats->xstorm_queue_statistics;
1715
1716 struct fcoe_statistics_params *fw_fcoe_stat =
1717 &bp->fw_stats_data->fcoe;
1718
1719 memset(afex_stats, 0, sizeof(struct afex_stats));
1720
1721 for_each_eth_queue(bp, i) {
Barak Witkowski15192a82012-06-19 07:48:28 +00001722 struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
Barak Witkowskia3348722012-04-23 03:04:46 +00001723
1724 ADD_64(afex_stats->rx_unicast_bytes_hi,
1725 qstats->total_unicast_bytes_received_hi,
1726 afex_stats->rx_unicast_bytes_lo,
1727 qstats->total_unicast_bytes_received_lo);
1728
1729 ADD_64(afex_stats->rx_broadcast_bytes_hi,
1730 qstats->total_broadcast_bytes_received_hi,
1731 afex_stats->rx_broadcast_bytes_lo,
1732 qstats->total_broadcast_bytes_received_lo);
1733
1734 ADD_64(afex_stats->rx_multicast_bytes_hi,
1735 qstats->total_multicast_bytes_received_hi,
1736 afex_stats->rx_multicast_bytes_lo,
1737 qstats->total_multicast_bytes_received_lo);
1738
1739 ADD_64(afex_stats->rx_unicast_frames_hi,
1740 qstats->total_unicast_packets_received_hi,
1741 afex_stats->rx_unicast_frames_lo,
1742 qstats->total_unicast_packets_received_lo);
1743
1744 ADD_64(afex_stats->rx_broadcast_frames_hi,
1745 qstats->total_broadcast_packets_received_hi,
1746 afex_stats->rx_broadcast_frames_lo,
1747 qstats->total_broadcast_packets_received_lo);
1748
1749 ADD_64(afex_stats->rx_multicast_frames_hi,
1750 qstats->total_multicast_packets_received_hi,
1751 afex_stats->rx_multicast_frames_lo,
1752 qstats->total_multicast_packets_received_lo);
1753
1754 /* sum to rx_frames_discarded all discraded
1755 * packets due to size, ttl0 and checksum
1756 */
1757 ADD_64(afex_stats->rx_frames_discarded_hi,
1758 qstats->total_packets_received_checksum_discarded_hi,
1759 afex_stats->rx_frames_discarded_lo,
1760 qstats->total_packets_received_checksum_discarded_lo);
1761
1762 ADD_64(afex_stats->rx_frames_discarded_hi,
1763 qstats->total_packets_received_ttl0_discarded_hi,
1764 afex_stats->rx_frames_discarded_lo,
1765 qstats->total_packets_received_ttl0_discarded_lo);
1766
1767 ADD_64(afex_stats->rx_frames_discarded_hi,
1768 qstats->etherstatsoverrsizepkts_hi,
1769 afex_stats->rx_frames_discarded_lo,
1770 qstats->etherstatsoverrsizepkts_lo);
1771
1772 ADD_64(afex_stats->rx_frames_dropped_hi,
1773 qstats->no_buff_discard_hi,
1774 afex_stats->rx_frames_dropped_lo,
1775 qstats->no_buff_discard_lo);
1776
1777 ADD_64(afex_stats->tx_unicast_bytes_hi,
1778 qstats->total_unicast_bytes_transmitted_hi,
1779 afex_stats->tx_unicast_bytes_lo,
1780 qstats->total_unicast_bytes_transmitted_lo);
1781
1782 ADD_64(afex_stats->tx_broadcast_bytes_hi,
1783 qstats->total_broadcast_bytes_transmitted_hi,
1784 afex_stats->tx_broadcast_bytes_lo,
1785 qstats->total_broadcast_bytes_transmitted_lo);
1786
1787 ADD_64(afex_stats->tx_multicast_bytes_hi,
1788 qstats->total_multicast_bytes_transmitted_hi,
1789 afex_stats->tx_multicast_bytes_lo,
1790 qstats->total_multicast_bytes_transmitted_lo);
1791
1792 ADD_64(afex_stats->tx_unicast_frames_hi,
1793 qstats->total_unicast_packets_transmitted_hi,
1794 afex_stats->tx_unicast_frames_lo,
1795 qstats->total_unicast_packets_transmitted_lo);
1796
1797 ADD_64(afex_stats->tx_broadcast_frames_hi,
1798 qstats->total_broadcast_packets_transmitted_hi,
1799 afex_stats->tx_broadcast_frames_lo,
1800 qstats->total_broadcast_packets_transmitted_lo);
1801
1802 ADD_64(afex_stats->tx_multicast_frames_hi,
1803 qstats->total_multicast_packets_transmitted_hi,
1804 afex_stats->tx_multicast_frames_lo,
1805 qstats->total_multicast_packets_transmitted_lo);
1806
1807 ADD_64(afex_stats->tx_frames_dropped_hi,
1808 qstats->total_transmitted_dropped_packets_error_hi,
1809 afex_stats->tx_frames_dropped_lo,
1810 qstats->total_transmitted_dropped_packets_error_lo);
1811 }
1812
1813 /* now add FCoE statistics which are collected separately
1814 * (both offloaded and non offloaded)
1815 */
1816 if (!NO_FCOE(bp)) {
1817 ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
1818 LE32_0,
1819 afex_stats->rx_unicast_bytes_lo,
1820 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
1821
1822 ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
1823 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
1824 afex_stats->rx_unicast_bytes_lo,
1825 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
1826
1827 ADD_64_LE(afex_stats->rx_broadcast_bytes_hi,
1828 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
1829 afex_stats->rx_broadcast_bytes_lo,
1830 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
1831
1832 ADD_64_LE(afex_stats->rx_multicast_bytes_hi,
1833 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
1834 afex_stats->rx_multicast_bytes_lo,
1835 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
1836
1837 ADD_64_LE(afex_stats->rx_unicast_frames_hi,
1838 LE32_0,
1839 afex_stats->rx_unicast_frames_lo,
1840 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
1841
1842 ADD_64_LE(afex_stats->rx_unicast_frames_hi,
1843 LE32_0,
1844 afex_stats->rx_unicast_frames_lo,
1845 fcoe_q_tstorm_stats->rcv_ucast_pkts);
1846
1847 ADD_64_LE(afex_stats->rx_broadcast_frames_hi,
1848 LE32_0,
1849 afex_stats->rx_broadcast_frames_lo,
1850 fcoe_q_tstorm_stats->rcv_bcast_pkts);
1851
1852 ADD_64_LE(afex_stats->rx_multicast_frames_hi,
1853 LE32_0,
1854 afex_stats->rx_multicast_frames_lo,
1855 fcoe_q_tstorm_stats->rcv_ucast_pkts);
1856
1857 ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1858 LE32_0,
1859 afex_stats->rx_frames_discarded_lo,
1860 fcoe_q_tstorm_stats->checksum_discard);
1861
1862 ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1863 LE32_0,
1864 afex_stats->rx_frames_discarded_lo,
1865 fcoe_q_tstorm_stats->pkts_too_big_discard);
1866
1867 ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1868 LE32_0,
1869 afex_stats->rx_frames_discarded_lo,
1870 fcoe_q_tstorm_stats->ttl0_discard);
1871
1872 ADD_64_LE16(afex_stats->rx_frames_dropped_hi,
1873 LE16_0,
1874 afex_stats->rx_frames_dropped_lo,
1875 fcoe_q_tstorm_stats->no_buff_discard);
1876
1877 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1878 LE32_0,
1879 afex_stats->rx_frames_dropped_lo,
1880 fcoe_q_ustorm_stats->ucast_no_buff_pkts);
1881
1882 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1883 LE32_0,
1884 afex_stats->rx_frames_dropped_lo,
1885 fcoe_q_ustorm_stats->mcast_no_buff_pkts);
1886
1887 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1888 LE32_0,
1889 afex_stats->rx_frames_dropped_lo,
1890 fcoe_q_ustorm_stats->bcast_no_buff_pkts);
1891
1892 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1893 LE32_0,
1894 afex_stats->rx_frames_dropped_lo,
1895 fw_fcoe_stat->rx_stat1.fcoe_rx_drop_pkt_cnt);
1896
1897 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1898 LE32_0,
1899 afex_stats->rx_frames_dropped_lo,
1900 fw_fcoe_stat->rx_stat2.fcoe_rx_drop_pkt_cnt);
1901
1902 ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
1903 LE32_0,
1904 afex_stats->tx_unicast_bytes_lo,
1905 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
1906
1907 ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
1908 fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
1909 afex_stats->tx_unicast_bytes_lo,
1910 fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
1911
1912 ADD_64_LE(afex_stats->tx_broadcast_bytes_hi,
1913 fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
1914 afex_stats->tx_broadcast_bytes_lo,
1915 fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
1916
1917 ADD_64_LE(afex_stats->tx_multicast_bytes_hi,
1918 fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
1919 afex_stats->tx_multicast_bytes_lo,
1920 fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
1921
1922 ADD_64_LE(afex_stats->tx_unicast_frames_hi,
1923 LE32_0,
1924 afex_stats->tx_unicast_frames_lo,
1925 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
1926
1927 ADD_64_LE(afex_stats->tx_unicast_frames_hi,
1928 LE32_0,
1929 afex_stats->tx_unicast_frames_lo,
1930 fcoe_q_xstorm_stats->ucast_pkts_sent);
1931
1932 ADD_64_LE(afex_stats->tx_broadcast_frames_hi,
1933 LE32_0,
1934 afex_stats->tx_broadcast_frames_lo,
1935 fcoe_q_xstorm_stats->bcast_pkts_sent);
1936
1937 ADD_64_LE(afex_stats->tx_multicast_frames_hi,
1938 LE32_0,
1939 afex_stats->tx_multicast_frames_lo,
1940 fcoe_q_xstorm_stats->mcast_pkts_sent);
1941
1942 ADD_64_LE(afex_stats->tx_frames_dropped_hi,
1943 LE32_0,
1944 afex_stats->tx_frames_dropped_lo,
1945 fcoe_q_xstorm_stats->error_drop_pkts);
1946 }
1947
1948 /* if port stats are requested, add them to the PMF
1949 * stats, as anyway they will be accumulated by the
1950 * MCP before sent to the switch
1951 */
1952 if ((bp->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) {
1953 ADD_64(afex_stats->rx_frames_dropped_hi,
1954 0,
1955 afex_stats->rx_frames_dropped_lo,
1956 estats->mac_filter_discard);
1957 ADD_64(afex_stats->rx_frames_dropped_hi,
1958 0,
1959 afex_stats->rx_frames_dropped_lo,
1960 estats->brb_truncate_discard);
1961 ADD_64(afex_stats->rx_frames_discarded_hi,
1962 0,
1963 afex_stats->rx_frames_discarded_lo,
1964 estats->mac_discard);
1965 }
1966}
Ariel Eliora3097bd2013-08-28 01:13:04 +03001967
Yuval Mintzdff173d2015-03-23 10:56:14 +02001968int bnx2x_stats_safe_exec(struct bnx2x *bp,
1969 void (func_to_exec)(void *cookie),
1970 void *cookie)
1971{
1972 int cnt = 10, rc = 0;
1973
1974 /* Wait for statistics to end [while blocking further requests],
1975 * then run supplied function 'safely'.
1976 */
Yuval Mintzc6e36d82015-06-01 15:08:18 +03001977 rc = down_timeout(&bp->stats_lock, HZ / 10);
1978 if (unlikely(rc)) {
1979 BNX2X_ERR("Failed to take statistics lock for safe execution\n");
1980 goto out_no_lock;
1981 }
Yuval Mintzdff173d2015-03-23 10:56:14 +02001982
Ariel Eliora3097bd2013-08-28 01:13:04 +03001983 bnx2x_stats_comp(bp);
Yuval Mintzdff173d2015-03-23 10:56:14 +02001984 while (bp->stats_pending && cnt--)
1985 if (bnx2x_storm_stats_update(bp))
1986 usleep_range(1000, 2000);
1987 if (bp->stats_pending) {
1988 BNX2X_ERR("Failed to wait for stats pending to clear [possibly FW is stuck]\n");
1989 rc = -EBUSY;
1990 goto out;
1991 }
1992
Ariel Eliora3097bd2013-08-28 01:13:04 +03001993 func_to_exec(cookie);
Yuval Mintzdff173d2015-03-23 10:56:14 +02001994
1995out:
1996 /* No need to restart statistics - if they're enabled, the timer
1997 * will restart the statistics.
1998 */
Yuval Mintzc6e36d82015-06-01 15:08:18 +03001999 up(&bp->stats_lock);
2000out_no_lock:
Yuval Mintzdff173d2015-03-23 10:56:14 +02002001 return rc;
Ariel Eliora3097bd2013-08-28 01:13:04 +03002002}