blob: 2ca3d94fcec2ba5f5f11db647d4454ddeafa34ad [file] [log] [blame]
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001/* bnx2x_stats.c: Broadcom Everest network driver.
2 *
Yuval Mintz247fa822013-01-14 05:11:50 +00003 * Copyright (c) 2007-2013 Broadcom Corporation
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
Joe Perchesf1deab52011-08-14 12:16:21 +000017
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Dmitry Kravkovf85582f2010-10-06 03:34:21 +000020#include "bnx2x_stats.h"
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030021#include "bnx2x_cmn.h"
Ariel Elior67c431a2013-01-01 05:22:36 +000022#include "bnx2x_sriov.h"
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000023
24/* Statistics */
25
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000026/*
27 * General service functions
28 */
29
30static inline long bnx2x_hilo(u32 *hiref)
31{
32 u32 lo = *(hiref + 1);
33#if (BITS_PER_LONG == 64)
34 u32 hi = *hiref;
35
36 return HILO_U64(hi, lo);
37#else
38 return lo;
39#endif
40}
41
Yuval Mintz08e9acc2012-09-10 05:51:04 +000042static inline u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp)
Barak Witkowski1d187b32011-12-05 22:41:50 +000043{
Yuval Mintz08e9acc2012-09-10 05:51:04 +000044 u16 res = 0;
Barak Witkowski1d187b32011-12-05 22:41:50 +000045
Yuval Mintz08e9acc2012-09-10 05:51:04 +000046 /* 'newest' convention - shmem2 cotains the size of the port stats */
47 if (SHMEM2_HAS(bp, sizeof_port_stats)) {
48 u32 size = SHMEM2_RD(bp, sizeof_port_stats);
49 if (size)
50 res = size;
Barak Witkowski1d187b32011-12-05 22:41:50 +000051
Yuval Mintz08e9acc2012-09-10 05:51:04 +000052 /* prevent newer BC from causing buffer overflow */
53 if (res > sizeof(struct host_port_stats))
54 res = sizeof(struct host_port_stats);
55 }
56
57 /* Older convention - all BCs support the port stats' fields up until
58 * the 'not_used' field
59 */
60 if (!res) {
61 res = offsetof(struct host_port_stats, not_used) + 4;
62
63 /* if PFC stats are supported by the MFW, DMA them as well */
64 if (bp->flags & BC_SUPPORTS_PFC_STATS) {
65 res += offsetof(struct host_port_stats,
66 pfc_frames_rx_lo) -
67 offsetof(struct host_port_stats,
68 pfc_frames_tx_hi) + 4 ;
69 }
70 }
71
72 res >>= 2;
73
74 WARN_ON(res > 2 * DMAE_LEN32_RD_MAX);
Barak Witkowski1d187b32011-12-05 22:41:50 +000075 return res;
76}
77
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000078/*
79 * Init service functions
80 */
81
Ariel Elior67c431a2013-01-01 05:22:36 +000082static void bnx2x_dp_stats(struct bnx2x *bp)
83{
84 int i;
85
86 DP(BNX2X_MSG_STATS, "dumping stats:\n"
87 "fw_stats_req\n"
88 " hdr\n"
89 " cmd_num %d\n"
90 " reserved0 %d\n"
91 " drv_stats_counter %d\n"
92 " reserved1 %d\n"
93 " stats_counters_addrs %x %x\n",
94 bp->fw_stats_req->hdr.cmd_num,
95 bp->fw_stats_req->hdr.reserved0,
96 bp->fw_stats_req->hdr.drv_stats_counter,
97 bp->fw_stats_req->hdr.reserved1,
98 bp->fw_stats_req->hdr.stats_counters_addrs.hi,
99 bp->fw_stats_req->hdr.stats_counters_addrs.lo);
100
101 for (i = 0; i < bp->fw_stats_req->hdr.cmd_num; i++) {
102 DP(BNX2X_MSG_STATS,
103 "query[%d]\n"
104 " kind %d\n"
105 " index %d\n"
106 " funcID %d\n"
107 " reserved %d\n"
108 " address %x %x\n",
109 i, bp->fw_stats_req->query[i].kind,
110 bp->fw_stats_req->query[i].index,
111 bp->fw_stats_req->query[i].funcID,
112 bp->fw_stats_req->query[i].reserved,
113 bp->fw_stats_req->query[i].address.hi,
114 bp->fw_stats_req->query[i].address.lo);
115 }
116}
117
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300118/* Post the next statistics ramrod. Protect it with the spin in
119 * order to ensure the strict order between statistics ramrods
120 * (each ramrod has a sequence number passed in a
121 * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be
122 * sent in order).
123 */
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000124static void bnx2x_storm_stats_post(struct bnx2x *bp)
125{
126 if (!bp->stats_pending) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300127 int rc;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000128
David S. Millerbb7e95c2010-07-27 21:01:35 -0700129 spin_lock_bh(&bp->stats_lock);
130
Vladislav Zolotarov9bcb8012011-01-09 02:20:34 +0000131 if (bp->stats_pending) {
132 spin_unlock_bh(&bp->stats_lock);
133 return;
134 }
135
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300136 bp->fw_stats_req->hdr.drv_stats_counter =
137 cpu_to_le16(bp->stats_counter++);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000138
Merav Sicron51c1a582012-03-18 10:33:38 +0000139 DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300140 bp->fw_stats_req->hdr.drv_stats_counter);
141
Ariel Elior67c431a2013-01-01 05:22:36 +0000142 /* adjust the ramrod to include VF queues statistics */
143 bnx2x_iov_adjust_stats_req(bp);
144 bnx2x_dp_stats(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300145
146 /* send FW stats ramrod */
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000147 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300148 U64_HI(bp->fw_stats_req_mapping),
149 U64_LO(bp->fw_stats_req_mapping),
150 NONE_CONNECTION_TYPE);
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +0000151 if (rc == 0)
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000152 bp->stats_pending = 1;
David S. Millerbb7e95c2010-07-27 21:01:35 -0700153
154 spin_unlock_bh(&bp->stats_lock);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000155 }
156}
157
158static void bnx2x_hw_stats_post(struct bnx2x *bp)
159{
160 struct dmae_command *dmae = &bp->stats_dmae;
161 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
162
163 *stats_comp = DMAE_COMP_VAL;
164 if (CHIP_REV_IS_SLOW(bp))
165 return;
166
Yuval Mintz217aeb82012-09-11 04:34:09 +0000167 /* Update MCP's statistics if possible */
168 if (bp->func_stx)
169 memcpy(bnx2x_sp(bp, func_stats), &bp->func_stats,
170 sizeof(bp->func_stats));
171
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000172 /* loader */
173 if (bp->executer_idx) {
174 int loader_idx = PMF_DMAE_C(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000175 u32 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
176 true, DMAE_COMP_GRC);
177 opcode = bnx2x_dmae_opcode_clr_src_reset(opcode);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000178
179 memset(dmae, 0, sizeof(struct dmae_command));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000180 dmae->opcode = opcode;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000181 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
182 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
183 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
184 sizeof(struct dmae_command) *
185 (loader_idx + 1)) >> 2;
186 dmae->dst_addr_hi = 0;
187 dmae->len = sizeof(struct dmae_command) >> 2;
188 if (CHIP_IS_E1(bp))
189 dmae->len--;
190 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
191 dmae->comp_addr_hi = 0;
192 dmae->comp_val = 1;
193
194 *stats_comp = 0;
195 bnx2x_post_dmae(bp, dmae, loader_idx);
196
197 } else if (bp->func_stx) {
198 *stats_comp = 0;
199 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
200 }
201}
202
203static int bnx2x_stats_comp(struct bnx2x *bp)
204{
205 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
206 int cnt = 10;
207
208 might_sleep();
209 while (*stats_comp != DMAE_COMP_VAL) {
210 if (!cnt) {
211 BNX2X_ERR("timeout waiting for stats finished\n");
212 break;
213 }
214 cnt--;
Yuval Mintz0926d492013-01-23 03:21:45 +0000215 usleep_range(1000, 2000);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000216 }
217 return 1;
218}
219
220/*
221 * Statistics service functions
222 */
223
224static void bnx2x_stats_pmf_update(struct bnx2x *bp)
225{
226 struct dmae_command *dmae;
227 u32 opcode;
228 int loader_idx = PMF_DMAE_C(bp);
229 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
230
231 /* sanity */
Mintz Yuval1355b702012-02-15 02:10:22 +0000232 if (!bp->port.pmf || !bp->port.port_stx) {
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000233 BNX2X_ERR("BUG!\n");
234 return;
235 }
236
237 bp->executer_idx = 0;
238
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000239 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000240
241 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000242 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000243 dmae->src_addr_lo = bp->port.port_stx >> 2;
244 dmae->src_addr_hi = 0;
245 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
246 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
247 dmae->len = DMAE_LEN32_RD_MAX;
248 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
249 dmae->comp_addr_hi = 0;
250 dmae->comp_val = 1;
251
252 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000253 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000254 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
255 dmae->src_addr_hi = 0;
256 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
257 DMAE_LEN32_RD_MAX * 4);
258 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
259 DMAE_LEN32_RD_MAX * 4);
Barak Witkowski1d187b32011-12-05 22:41:50 +0000260 dmae->len = bnx2x_get_port_stats_dma_len(bp) - DMAE_LEN32_RD_MAX;
261
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000262 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
263 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
264 dmae->comp_val = DMAE_COMP_VAL;
265
266 *stats_comp = 0;
267 bnx2x_hw_stats_post(bp);
268 bnx2x_stats_comp(bp);
269}
270
271static void bnx2x_port_stats_init(struct bnx2x *bp)
272{
273 struct dmae_command *dmae;
274 int port = BP_PORT(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000275 u32 opcode;
276 int loader_idx = PMF_DMAE_C(bp);
277 u32 mac_addr;
278 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
279
280 /* sanity */
281 if (!bp->link_vars.link_up || !bp->port.pmf) {
282 BNX2X_ERR("BUG!\n");
283 return;
284 }
285
286 bp->executer_idx = 0;
287
288 /* MCP */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000289 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
290 true, DMAE_COMP_GRC);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000291
292 if (bp->port.port_stx) {
293
294 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
295 dmae->opcode = opcode;
296 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
297 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
298 dmae->dst_addr_lo = bp->port.port_stx >> 2;
299 dmae->dst_addr_hi = 0;
Barak Witkowski1d187b32011-12-05 22:41:50 +0000300 dmae->len = bnx2x_get_port_stats_dma_len(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000301 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
302 dmae->comp_addr_hi = 0;
303 dmae->comp_val = 1;
304 }
305
306 if (bp->func_stx) {
307
308 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
309 dmae->opcode = opcode;
310 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
311 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
312 dmae->dst_addr_lo = bp->func_stx >> 2;
313 dmae->dst_addr_hi = 0;
314 dmae->len = sizeof(struct host_func_stats) >> 2;
315 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
316 dmae->comp_addr_hi = 0;
317 dmae->comp_val = 1;
318 }
319
320 /* MAC */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000321 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
322 true, DMAE_COMP_GRC);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000323
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300324 /* EMAC is special */
325 if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000326 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
327
328 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
329 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
330 dmae->opcode = opcode;
331 dmae->src_addr_lo = (mac_addr +
332 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
333 dmae->src_addr_hi = 0;
334 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
335 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
336 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
337 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
338 dmae->comp_addr_hi = 0;
339 dmae->comp_val = 1;
340
341 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
342 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
343 dmae->opcode = opcode;
344 dmae->src_addr_lo = (mac_addr +
345 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
346 dmae->src_addr_hi = 0;
347 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
348 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
349 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
350 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
351 dmae->len = 1;
352 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
353 dmae->comp_addr_hi = 0;
354 dmae->comp_val = 1;
355
356 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
357 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
358 dmae->opcode = opcode;
359 dmae->src_addr_lo = (mac_addr +
360 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
361 dmae->src_addr_hi = 0;
362 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
363 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
364 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
365 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
366 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
367 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
368 dmae->comp_addr_hi = 0;
369 dmae->comp_val = 1;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300370 } else {
371 u32 tx_src_addr_lo, rx_src_addr_lo;
372 u16 rx_len, tx_len;
373
374 /* configure the params according to MAC type */
375 switch (bp->link_vars.mac_type) {
376 case MAC_TYPE_BMAC:
377 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
378 NIG_REG_INGRESS_BMAC0_MEM);
379
380 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
381 BIGMAC_REGISTER_TX_STAT_GTBYT */
382 if (CHIP_IS_E1x(bp)) {
383 tx_src_addr_lo = (mac_addr +
384 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
385 tx_len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
386 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
387 rx_src_addr_lo = (mac_addr +
388 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
389 rx_len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
390 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
391 } else {
392 tx_src_addr_lo = (mac_addr +
393 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
394 tx_len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
395 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
396 rx_src_addr_lo = (mac_addr +
397 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
398 rx_len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
399 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
400 }
401 break;
402
403 case MAC_TYPE_UMAC: /* handled by MSTAT */
404 case MAC_TYPE_XMAC: /* handled by MSTAT */
405 default:
406 mac_addr = port ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0;
407 tx_src_addr_lo = (mac_addr +
408 MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2;
409 rx_src_addr_lo = (mac_addr +
410 MSTAT_REG_RX_STAT_GR64_LO) >> 2;
411 tx_len = sizeof(bp->slowpath->
412 mac_stats.mstat_stats.stats_tx) >> 2;
413 rx_len = sizeof(bp->slowpath->
414 mac_stats.mstat_stats.stats_rx) >> 2;
415 break;
416 }
417
418 /* TX stats */
419 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
420 dmae->opcode = opcode;
421 dmae->src_addr_lo = tx_src_addr_lo;
422 dmae->src_addr_hi = 0;
423 dmae->len = tx_len;
424 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
425 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
426 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
427 dmae->comp_addr_hi = 0;
428 dmae->comp_val = 1;
429
430 /* RX stats */
431 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
432 dmae->opcode = opcode;
433 dmae->src_addr_hi = 0;
434 dmae->src_addr_lo = rx_src_addr_lo;
435 dmae->dst_addr_lo =
436 U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
437 dmae->dst_addr_hi =
438 U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
439 dmae->len = rx_len;
440 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
441 dmae->comp_addr_hi = 0;
442 dmae->comp_val = 1;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000443 }
444
445 /* NIG */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300446 if (!CHIP_IS_E3(bp)) {
447 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
448 dmae->opcode = opcode;
449 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
450 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
451 dmae->src_addr_hi = 0;
452 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
453 offsetof(struct nig_stats, egress_mac_pkt0_lo));
454 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
455 offsetof(struct nig_stats, egress_mac_pkt0_lo));
456 dmae->len = (2*sizeof(u32)) >> 2;
457 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
458 dmae->comp_addr_hi = 0;
459 dmae->comp_val = 1;
460
461 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
462 dmae->opcode = opcode;
463 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
464 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
465 dmae->src_addr_hi = 0;
466 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
467 offsetof(struct nig_stats, egress_mac_pkt1_lo));
468 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
469 offsetof(struct nig_stats, egress_mac_pkt1_lo));
470 dmae->len = (2*sizeof(u32)) >> 2;
471 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
472 dmae->comp_addr_hi = 0;
473 dmae->comp_val = 1;
474 }
475
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000476 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300477 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
478 true, DMAE_COMP_PCI);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000479 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
480 NIG_REG_STAT0_BRB_DISCARD) >> 2;
481 dmae->src_addr_hi = 0;
482 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
483 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
484 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000485
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000486 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
487 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
488 dmae->comp_val = DMAE_COMP_VAL;
489
490 *stats_comp = 0;
491}
492
493static void bnx2x_func_stats_init(struct bnx2x *bp)
494{
495 struct dmae_command *dmae = &bp->stats_dmae;
496 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
497
498 /* sanity */
499 if (!bp->func_stx) {
500 BNX2X_ERR("BUG!\n");
501 return;
502 }
503
504 bp->executer_idx = 0;
505 memset(dmae, 0, sizeof(struct dmae_command));
506
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000507 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
508 true, DMAE_COMP_PCI);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000509 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
510 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
511 dmae->dst_addr_lo = bp->func_stx >> 2;
512 dmae->dst_addr_hi = 0;
513 dmae->len = sizeof(struct host_func_stats) >> 2;
514 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
515 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
516 dmae->comp_val = DMAE_COMP_VAL;
517
518 *stats_comp = 0;
519}
520
521static void bnx2x_stats_start(struct bnx2x *bp)
522{
Ariel Elior67c431a2013-01-01 05:22:36 +0000523 /* vfs travel through here as part of the statistics FSM, but no action
524 * is required
525 */
526 if (IS_VF(bp))
527 return;
528
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000529 if (bp->port.pmf)
530 bnx2x_port_stats_init(bp);
531
532 else if (bp->func_stx)
533 bnx2x_func_stats_init(bp);
534
535 bnx2x_hw_stats_post(bp);
536 bnx2x_storm_stats_post(bp);
537}
538
539static void bnx2x_stats_pmf_start(struct bnx2x *bp)
540{
541 bnx2x_stats_comp(bp);
542 bnx2x_stats_pmf_update(bp);
543 bnx2x_stats_start(bp);
544}
545
546static void bnx2x_stats_restart(struct bnx2x *bp)
547{
Ariel Elior67c431a2013-01-01 05:22:36 +0000548 /* vfs travel through here as part of the statistics FSM, but no action
549 * is required
550 */
551 if (IS_VF(bp))
552 return;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000553 bnx2x_stats_comp(bp);
554 bnx2x_stats_start(bp);
555}
556
557static void bnx2x_bmac_stats_update(struct bnx2x *bp)
558{
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000559 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
560 struct bnx2x_eth_stats *estats = &bp->eth_stats;
561 struct {
562 u32 lo;
563 u32 hi;
564 } diff;
565
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000566 if (CHIP_IS_E1x(bp)) {
567 struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats);
568
569 /* the macros below will use "bmac1_stats" type */
570 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
571 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
572 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
573 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
574 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
575 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
576 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
577 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300578 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
579
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000580 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
581 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
582 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
583 UPDATE_STAT64(tx_stat_gt127,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000584 tx_stat_etherstatspkts65octetsto127octets);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000585 UPDATE_STAT64(tx_stat_gt255,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000586 tx_stat_etherstatspkts128octetsto255octets);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000587 UPDATE_STAT64(tx_stat_gt511,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000588 tx_stat_etherstatspkts256octetsto511octets);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000589 UPDATE_STAT64(tx_stat_gt1023,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000590 tx_stat_etherstatspkts512octetsto1023octets);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000591 UPDATE_STAT64(tx_stat_gt1518,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000592 tx_stat_etherstatspkts1024octetsto1522octets);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300593 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
594 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
595 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
596 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000597 UPDATE_STAT64(tx_stat_gterr,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000598 tx_stat_dot3statsinternalmactransmiterrors);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300599 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000600
601 } else {
602 struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats);
603
604 /* the macros below will use "bmac2_stats" type */
605 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
606 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
607 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
608 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
609 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
610 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
611 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
612 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300613 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000614 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
615 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
616 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
617 UPDATE_STAT64(tx_stat_gt127,
618 tx_stat_etherstatspkts65octetsto127octets);
619 UPDATE_STAT64(tx_stat_gt255,
620 tx_stat_etherstatspkts128octetsto255octets);
621 UPDATE_STAT64(tx_stat_gt511,
622 tx_stat_etherstatspkts256octetsto511octets);
623 UPDATE_STAT64(tx_stat_gt1023,
624 tx_stat_etherstatspkts512octetsto1023octets);
625 UPDATE_STAT64(tx_stat_gt1518,
626 tx_stat_etherstatspkts1024octetsto1522octets);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300627 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
628 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
629 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
630 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000631 UPDATE_STAT64(tx_stat_gterr,
632 tx_stat_dot3statsinternalmactransmiterrors);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300633 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000634
635 /* collect PFC stats */
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000636 pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
637 pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000638
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000639 pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
640 pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000641 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000642
643 estats->pause_frames_received_hi =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300644 pstats->mac_stx[1].rx_stat_mac_xpf_hi;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000645 estats->pause_frames_received_lo =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300646 pstats->mac_stx[1].rx_stat_mac_xpf_lo;
647
648 estats->pause_frames_sent_hi =
649 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
650 estats->pause_frames_sent_lo =
651 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000652
653 estats->pfc_frames_received_hi =
654 pstats->pfc_frames_rx_hi;
655 estats->pfc_frames_received_lo =
656 pstats->pfc_frames_rx_lo;
657 estats->pfc_frames_sent_hi =
658 pstats->pfc_frames_tx_hi;
659 estats->pfc_frames_sent_lo =
660 pstats->pfc_frames_tx_lo;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300661}
662
663static void bnx2x_mstat_stats_update(struct bnx2x *bp)
664{
665 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
666 struct bnx2x_eth_stats *estats = &bp->eth_stats;
667
668 struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats);
669
670 ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets);
671 ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors);
672 ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts);
673 ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong);
674 ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments);
675 ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived);
676 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered);
677 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf);
678 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
679 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
680
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000681 /* collect pfc stats */
682 ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi,
683 pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo);
684 ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi,
685 pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300686
687 ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
688 ADD_STAT64(stats_tx.tx_gt127,
689 tx_stat_etherstatspkts65octetsto127octets);
690 ADD_STAT64(stats_tx.tx_gt255,
691 tx_stat_etherstatspkts128octetsto255octets);
692 ADD_STAT64(stats_tx.tx_gt511,
693 tx_stat_etherstatspkts256octetsto511octets);
694 ADD_STAT64(stats_tx.tx_gt1023,
695 tx_stat_etherstatspkts512octetsto1023octets);
696 ADD_STAT64(stats_tx.tx_gt1518,
697 tx_stat_etherstatspkts1024octetsto1522octets);
698 ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047);
699
700 ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095);
701 ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216);
702 ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383);
703
704 ADD_STAT64(stats_tx.tx_gterr,
705 tx_stat_dot3statsinternalmactransmiterrors);
706 ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl);
707
Mintz Yuval1355b702012-02-15 02:10:22 +0000708 estats->etherstatspkts1024octetsto1522octets_hi =
709 pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi;
710 estats->etherstatspkts1024octetsto1522octets_lo =
711 pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo;
712
713 estats->etherstatspktsover1522octets_hi =
714 pstats->mac_stx[1].tx_stat_mac_2047_hi;
715 estats->etherstatspktsover1522octets_lo =
716 pstats->mac_stx[1].tx_stat_mac_2047_lo;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300717
718 ADD_64(estats->etherstatspktsover1522octets_hi,
Mintz Yuval1355b702012-02-15 02:10:22 +0000719 pstats->mac_stx[1].tx_stat_mac_4095_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300720 estats->etherstatspktsover1522octets_lo,
Mintz Yuval1355b702012-02-15 02:10:22 +0000721 pstats->mac_stx[1].tx_stat_mac_4095_lo);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300722
723 ADD_64(estats->etherstatspktsover1522octets_hi,
Mintz Yuval1355b702012-02-15 02:10:22 +0000724 pstats->mac_stx[1].tx_stat_mac_9216_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300725 estats->etherstatspktsover1522octets_lo,
Mintz Yuval1355b702012-02-15 02:10:22 +0000726 pstats->mac_stx[1].tx_stat_mac_9216_lo);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300727
728 ADD_64(estats->etherstatspktsover1522octets_hi,
Mintz Yuval1355b702012-02-15 02:10:22 +0000729 pstats->mac_stx[1].tx_stat_mac_16383_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300730 estats->etherstatspktsover1522octets_lo,
Mintz Yuval1355b702012-02-15 02:10:22 +0000731 pstats->mac_stx[1].tx_stat_mac_16383_lo);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300732
733 estats->pause_frames_received_hi =
734 pstats->mac_stx[1].rx_stat_mac_xpf_hi;
735 estats->pause_frames_received_lo =
736 pstats->mac_stx[1].rx_stat_mac_xpf_lo;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000737
738 estats->pause_frames_sent_hi =
739 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
740 estats->pause_frames_sent_lo =
741 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000742
743 estats->pfc_frames_received_hi =
744 pstats->pfc_frames_rx_hi;
745 estats->pfc_frames_received_lo =
746 pstats->pfc_frames_rx_lo;
747 estats->pfc_frames_sent_hi =
748 pstats->pfc_frames_tx_hi;
749 estats->pfc_frames_sent_lo =
750 pstats->pfc_frames_tx_lo;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000751}
752
753static void bnx2x_emac_stats_update(struct bnx2x *bp)
754{
755 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
756 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
757 struct bnx2x_eth_stats *estats = &bp->eth_stats;
758
759 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
760 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
761 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
762 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
763 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
764 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
765 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
766 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
767 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
768 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
769 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
770 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
771 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
772 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
773 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
774 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
775 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
776 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
777 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
778 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
779 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
780 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
781 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
782 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
783 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
784 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
785 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
786 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
787 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
788 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
789 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
790
791 estats->pause_frames_received_hi =
792 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
793 estats->pause_frames_received_lo =
794 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
795 ADD_64(estats->pause_frames_received_hi,
796 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
797 estats->pause_frames_received_lo,
798 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
799
800 estats->pause_frames_sent_hi =
801 pstats->mac_stx[1].tx_stat_outxonsent_hi;
802 estats->pause_frames_sent_lo =
803 pstats->mac_stx[1].tx_stat_outxonsent_lo;
804 ADD_64(estats->pause_frames_sent_hi,
805 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
806 estats->pause_frames_sent_lo,
807 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
808}
809
810static int bnx2x_hw_stats_update(struct bnx2x *bp)
811{
812 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
813 struct nig_stats *old = &(bp->port.old_nig_stats);
814 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
815 struct bnx2x_eth_stats *estats = &bp->eth_stats;
816 struct {
817 u32 lo;
818 u32 hi;
819 } diff;
820
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300821 switch (bp->link_vars.mac_type) {
822 case MAC_TYPE_BMAC:
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000823 bnx2x_bmac_stats_update(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300824 break;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000825
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300826 case MAC_TYPE_EMAC:
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000827 bnx2x_emac_stats_update(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300828 break;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000829
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300830 case MAC_TYPE_UMAC:
831 case MAC_TYPE_XMAC:
832 bnx2x_mstat_stats_update(bp);
833 break;
834
835 case MAC_TYPE_NONE: /* unreached */
David S. Miller8decf862011-09-22 03:23:13 -0400836 DP(BNX2X_MSG_STATS,
837 "stats updated by DMAE but no MAC active\n");
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000838 return -1;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300839
840 default: /* unreached */
841 BNX2X_ERR("Unknown MAC type\n");
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000842 }
843
844 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
845 new->brb_discard - old->brb_discard);
846 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
847 new->brb_truncate - old->brb_truncate);
848
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300849 if (!CHIP_IS_E3(bp)) {
850 UPDATE_STAT64_NIG(egress_mac_pkt0,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000851 etherstatspkts1024octetsto1522octets);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300852 UPDATE_STAT64_NIG(egress_mac_pkt1,
853 etherstatspktsover1522octets);
854 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000855
856 memcpy(old, new, sizeof(struct nig_stats));
857
858 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
859 sizeof(struct mac_stx));
860 estats->brb_drop_hi = pstats->brb_drop_hi;
861 estats->brb_drop_lo = pstats->brb_drop_lo;
862
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000863 pstats->host_port_stats_counter++;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000864
Yuval Mintzc20cd5d2012-07-23 21:16:06 +0000865 if (CHIP_IS_E3(bp)) {
866 u32 lpi_reg = BP_PORT(bp) ? MISC_REG_CPMU_LP_SM_ENT_CNT_P1
867 : MISC_REG_CPMU_LP_SM_ENT_CNT_P0;
868 estats->eee_tx_lpi += REG_RD(bp, lpi_reg);
869 }
Yuval Mintzc8c60d82012-06-06 17:13:07 +0000870
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000871 if (!BP_NOMCP(bp)) {
872 u32 nig_timer_max =
873 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
874 if (nig_timer_max != estats->nig_timer_max) {
875 estats->nig_timer_max = nig_timer_max;
876 BNX2X_ERR("NIG timer max (%u)\n",
877 estats->nig_timer_max);
878 }
879 }
880
881 return 0;
882}
883
Ariel Elior67c431a2013-01-01 05:22:36 +0000884static int bnx2x_storm_stats_validate_counters(struct bnx2x *bp)
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000885{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300886 struct stats_counter *counters = &bp->fw_stats_data->storm_counters;
David S. Millerbb7e95c2010-07-27 21:01:35 -0700887 u16 cur_stats_counter;
David S. Millerbb7e95c2010-07-27 21:01:35 -0700888 /* Make sure we use the value of the counter
889 * used for sending the last stats ramrod.
890 */
891 spin_lock_bh(&bp->stats_lock);
892 cur_stats_counter = bp->stats_counter - 1;
893 spin_unlock_bh(&bp->stats_lock);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000894
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300895 /* are storm stats valid? */
896 if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000897 DP(BNX2X_MSG_STATS,
898 "stats not updated by xstorm xstorm counter (0x%x) != stats_counter (0x%x)\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300899 le16_to_cpu(counters->xstats_counter), bp->stats_counter);
900 return -EAGAIN;
901 }
902
903 if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000904 DP(BNX2X_MSG_STATS,
905 "stats not updated by ustorm ustorm counter (0x%x) != stats_counter (0x%x)\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300906 le16_to_cpu(counters->ustats_counter), bp->stats_counter);
907 return -EAGAIN;
908 }
909
910 if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000911 DP(BNX2X_MSG_STATS,
912 "stats not updated by cstorm cstorm counter (0x%x) != stats_counter (0x%x)\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300913 le16_to_cpu(counters->cstats_counter), bp->stats_counter);
914 return -EAGAIN;
915 }
916
917 if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000918 DP(BNX2X_MSG_STATS,
919 "stats not updated by tstorm tstorm counter (0x%x) != stats_counter (0x%x)\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300920 le16_to_cpu(counters->tstats_counter), bp->stats_counter);
921 return -EAGAIN;
922 }
Ariel Elior67c431a2013-01-01 05:22:36 +0000923 return 0;
924}
925
926static int bnx2x_storm_stats_update(struct bnx2x *bp)
927{
928 struct tstorm_per_port_stats *tport =
929 &bp->fw_stats_data->port.tstorm_port_statistics;
930 struct tstorm_per_pf_stats *tfunc =
931 &bp->fw_stats_data->pf.tstorm_pf_statistics;
932 struct host_func_stats *fstats = &bp->func_stats;
933 struct bnx2x_eth_stats *estats = &bp->eth_stats;
934 struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old;
935 int i;
936
937 /* vfs stat counter is managed by pf */
938 if (IS_PF(bp) && bnx2x_storm_stats_validate_counters(bp))
939 return -EAGAIN;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300940
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000941 estats->error_bytes_received_hi = 0;
942 estats->error_bytes_received_lo = 0;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000943
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000944 for_each_eth_queue(bp, i) {
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000945 struct bnx2x_fastpath *fp = &bp->fp[i];
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300946 struct tstorm_per_queue_stats *tclient =
947 &bp->fw_stats_data->queue_stats[i].
948 tstorm_queue_statistics;
Barak Witkowski15192a82012-06-19 07:48:28 +0000949 struct tstorm_per_queue_stats *old_tclient =
950 &bnx2x_fp_stats(bp, fp)->old_tclient;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300951 struct ustorm_per_queue_stats *uclient =
952 &bp->fw_stats_data->queue_stats[i].
953 ustorm_queue_statistics;
Barak Witkowski15192a82012-06-19 07:48:28 +0000954 struct ustorm_per_queue_stats *old_uclient =
955 &bnx2x_fp_stats(bp, fp)->old_uclient;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300956 struct xstorm_per_queue_stats *xclient =
957 &bp->fw_stats_data->queue_stats[i].
958 xstorm_queue_statistics;
Barak Witkowski15192a82012-06-19 07:48:28 +0000959 struct xstorm_per_queue_stats *old_xclient =
960 &bnx2x_fp_stats(bp, fp)->old_xclient;
961 struct bnx2x_eth_q_stats *qstats =
962 &bnx2x_fp_stats(bp, fp)->eth_q_stats;
963 struct bnx2x_eth_q_stats_old *qstats_old =
964 &bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
Mintz Yuval1355b702012-02-15 02:10:22 +0000965
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000966 u32 diff;
967
Merav Sicron51c1a582012-03-18 10:33:38 +0000968 DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, bcast_sent 0x%x mcast_sent 0x%x\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300969 i, xclient->ucast_pkts_sent,
970 xclient->bcast_pkts_sent, xclient->mcast_pkts_sent);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000971
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300972 DP(BNX2X_MSG_STATS, "---------------\n");
973
Mintz Yuval1355b702012-02-15 02:10:22 +0000974 UPDATE_QSTAT(tclient->rcv_bcast_bytes,
975 total_broadcast_bytes_received);
976 UPDATE_QSTAT(tclient->rcv_mcast_bytes,
977 total_multicast_bytes_received);
978 UPDATE_QSTAT(tclient->rcv_ucast_bytes,
979 total_unicast_bytes_received);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300980
981 /*
982 * sum to total_bytes_received all
983 * unicast/multicast/broadcast
984 */
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000985 qstats->total_bytes_received_hi =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300986 qstats->total_broadcast_bytes_received_hi;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000987 qstats->total_bytes_received_lo =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300988 qstats->total_broadcast_bytes_received_lo;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000989
990 ADD_64(qstats->total_bytes_received_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300991 qstats->total_multicast_bytes_received_hi,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000992 qstats->total_bytes_received_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300993 qstats->total_multicast_bytes_received_lo);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000994
995 ADD_64(qstats->total_bytes_received_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300996 qstats->total_unicast_bytes_received_hi,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000997 qstats->total_bytes_received_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300998 qstats->total_unicast_bytes_received_lo);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000999
1000 qstats->valid_bytes_received_hi =
1001 qstats->total_bytes_received_hi;
1002 qstats->valid_bytes_received_lo =
1003 qstats->total_bytes_received_lo;
1004
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001005
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001006 UPDATE_EXTEND_TSTAT(rcv_ucast_pkts,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001007 total_unicast_packets_received);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001008 UPDATE_EXTEND_TSTAT(rcv_mcast_pkts,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001009 total_multicast_packets_received);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001010 UPDATE_EXTEND_TSTAT(rcv_bcast_pkts,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001011 total_broadcast_packets_received);
Mintz Yuval1355b702012-02-15 02:10:22 +00001012 UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard,
Yuval Mintz86564c32013-01-23 03:21:50 +00001013 etherstatsoverrsizepkts, 32);
1014 UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard, 16);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001015
1016 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
1017 total_unicast_packets_received);
1018 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
1019 total_multicast_packets_received);
1020 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
1021 total_broadcast_packets_received);
Mintz Yuval1355b702012-02-15 02:10:22 +00001022 UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard);
1023 UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard);
1024 UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001025
Mintz Yuval1355b702012-02-15 02:10:22 +00001026 UPDATE_QSTAT(xclient->bcast_bytes_sent,
1027 total_broadcast_bytes_transmitted);
1028 UPDATE_QSTAT(xclient->mcast_bytes_sent,
1029 total_multicast_bytes_transmitted);
1030 UPDATE_QSTAT(xclient->ucast_bytes_sent,
1031 total_unicast_bytes_transmitted);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001032
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001033 /*
1034 * sum to total_bytes_transmitted all
1035 * unicast/multicast/broadcast
1036 */
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001037 qstats->total_bytes_transmitted_hi =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001038 qstats->total_unicast_bytes_transmitted_hi;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001039 qstats->total_bytes_transmitted_lo =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001040 qstats->total_unicast_bytes_transmitted_lo;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001041
1042 ADD_64(qstats->total_bytes_transmitted_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001043 qstats->total_broadcast_bytes_transmitted_hi,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001044 qstats->total_bytes_transmitted_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001045 qstats->total_broadcast_bytes_transmitted_lo);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001046
1047 ADD_64(qstats->total_bytes_transmitted_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001048 qstats->total_multicast_bytes_transmitted_hi,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001049 qstats->total_bytes_transmitted_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001050 qstats->total_multicast_bytes_transmitted_lo);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001051
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001052 UPDATE_EXTEND_XSTAT(ucast_pkts_sent,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001053 total_unicast_packets_transmitted);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001054 UPDATE_EXTEND_XSTAT(mcast_pkts_sent,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001055 total_multicast_packets_transmitted);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001056 UPDATE_EXTEND_XSTAT(bcast_pkts_sent,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001057 total_broadcast_packets_transmitted);
1058
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001059 UPDATE_EXTEND_TSTAT(checksum_discard,
1060 total_packets_received_checksum_discarded);
1061 UPDATE_EXTEND_TSTAT(ttl0_discard,
1062 total_packets_received_ttl0_discarded);
1063
1064 UPDATE_EXTEND_XSTAT(error_drop_pkts,
1065 total_transmitted_dropped_packets_error);
1066
1067 /* TPA aggregations completed */
Mintz Yuval1355b702012-02-15 02:10:22 +00001068 UPDATE_EXTEND_E_USTAT(coalesced_events, total_tpa_aggregations);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001069 /* Number of network frames aggregated by TPA */
Mintz Yuval1355b702012-02-15 02:10:22 +00001070 UPDATE_EXTEND_E_USTAT(coalesced_pkts,
1071 total_tpa_aggregated_frames);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001072 /* Total number of bytes in completed TPA aggregations */
Mintz Yuval1355b702012-02-15 02:10:22 +00001073 UPDATE_QSTAT(uclient->coalesced_bytes, total_tpa_bytes);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001074
Mintz Yuval1355b702012-02-15 02:10:22 +00001075 UPDATE_ESTAT_QSTAT_64(total_tpa_bytes);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001076
Mintz Yuval1355b702012-02-15 02:10:22 +00001077 UPDATE_FSTAT_QSTAT(total_bytes_received);
1078 UPDATE_FSTAT_QSTAT(total_bytes_transmitted);
1079 UPDATE_FSTAT_QSTAT(total_unicast_packets_received);
1080 UPDATE_FSTAT_QSTAT(total_multicast_packets_received);
1081 UPDATE_FSTAT_QSTAT(total_broadcast_packets_received);
1082 UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted);
1083 UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted);
1084 UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted);
1085 UPDATE_FSTAT_QSTAT(valid_bytes_received);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001086 }
1087
Mintz Yuval1355b702012-02-15 02:10:22 +00001088 ADD_64(estats->total_bytes_received_hi,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001089 estats->rx_stat_ifhcinbadoctets_hi,
Mintz Yuval1355b702012-02-15 02:10:22 +00001090 estats->total_bytes_received_lo,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001091 estats->rx_stat_ifhcinbadoctets_lo);
1092
Yuval Mintz86564c32013-01-23 03:21:50 +00001093 ADD_64_LE(estats->total_bytes_received_hi,
1094 tfunc->rcv_error_bytes.hi,
1095 estats->total_bytes_received_lo,
1096 tfunc->rcv_error_bytes.lo);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001097
Yuval Mintz86564c32013-01-23 03:21:50 +00001098 ADD_64_LE(estats->error_bytes_received_hi,
1099 tfunc->rcv_error_bytes.hi,
1100 estats->error_bytes_received_lo,
1101 tfunc->rcv_error_bytes.lo);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001102
Mintz Yuval1355b702012-02-15 02:10:22 +00001103 UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong);
1104
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001105 ADD_64(estats->error_bytes_received_hi,
1106 estats->rx_stat_ifhcinbadoctets_hi,
1107 estats->error_bytes_received_lo,
1108 estats->rx_stat_ifhcinbadoctets_lo);
1109
1110 if (bp->port.pmf) {
Mintz Yuval1355b702012-02-15 02:10:22 +00001111 struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old;
1112 UPDATE_FW_STAT(mac_filter_discard);
1113 UPDATE_FW_STAT(mf_tag_discard);
1114 UPDATE_FW_STAT(brb_truncate_discard);
1115 UPDATE_FW_STAT(mac_discard);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001116 }
1117
1118 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
1119
1120 bp->stats_pending = 0;
1121
1122 return 0;
1123}
1124
1125static void bnx2x_net_stats_update(struct bnx2x *bp)
1126{
1127 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1128 struct net_device_stats *nstats = &bp->dev->stats;
Eric Dumazetc6a056a2010-08-23 19:44:08 +00001129 unsigned long tmp;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001130 int i;
1131
1132 nstats->rx_packets =
1133 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
1134 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
1135 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
1136
1137 nstats->tx_packets =
1138 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
1139 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
1140 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
1141
1142 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
1143
1144 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
1145
Eric Dumazetc6a056a2010-08-23 19:44:08 +00001146 tmp = estats->mac_discard;
Barak Witkowski15192a82012-06-19 07:48:28 +00001147 for_each_rx_queue(bp, i) {
1148 struct tstorm_per_queue_stats *old_tclient =
1149 &bp->fp_stats[i].old_tclient;
1150 tmp += le32_to_cpu(old_tclient->checksum_discard);
1151 }
Mintz Yuval1355b702012-02-15 02:10:22 +00001152 nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001153
1154 nstats->tx_dropped = 0;
1155
1156 nstats->multicast =
1157 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
1158
1159 nstats->collisions =
1160 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
1161
1162 nstats->rx_length_errors =
1163 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
1164 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
1165 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
1166 bnx2x_hilo(&estats->brb_truncate_hi);
1167 nstats->rx_crc_errors =
1168 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
1169 nstats->rx_frame_errors =
1170 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
1171 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001172 nstats->rx_missed_errors = 0;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001173
1174 nstats->rx_errors = nstats->rx_length_errors +
1175 nstats->rx_over_errors +
1176 nstats->rx_crc_errors +
1177 nstats->rx_frame_errors +
1178 nstats->rx_fifo_errors +
1179 nstats->rx_missed_errors;
1180
1181 nstats->tx_aborted_errors =
1182 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
1183 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
1184 nstats->tx_carrier_errors =
1185 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
1186 nstats->tx_fifo_errors = 0;
1187 nstats->tx_heartbeat_errors = 0;
1188 nstats->tx_window_errors = 0;
1189
1190 nstats->tx_errors = nstats->tx_aborted_errors +
1191 nstats->tx_carrier_errors +
1192 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
1193}
1194
1195static void bnx2x_drv_stats_update(struct bnx2x *bp)
1196{
1197 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1198 int i;
1199
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001200 for_each_queue(bp, i) {
Barak Witkowski15192a82012-06-19 07:48:28 +00001201 struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
Mintz Yuval1355b702012-02-15 02:10:22 +00001202 struct bnx2x_eth_q_stats_old *qstats_old =
Barak Witkowski15192a82012-06-19 07:48:28 +00001203 &bp->fp_stats[i].eth_q_stats_old;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001204
Mintz Yuval1355b702012-02-15 02:10:22 +00001205 UPDATE_ESTAT_QSTAT(driver_xoff);
1206 UPDATE_ESTAT_QSTAT(rx_err_discard_pkt);
1207 UPDATE_ESTAT_QSTAT(rx_skb_alloc_failed);
1208 UPDATE_ESTAT_QSTAT(hw_csum_err);
Dmitry Kravkovc96bdc02012-12-02 04:05:48 +00001209 UPDATE_ESTAT_QSTAT(driver_filtered_tx_pkt);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001210 }
1211}
1212
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001213static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp)
1214{
1215 u32 val;
1216
1217 if (SHMEM2_HAS(bp, edebug_driver_if[1])) {
1218 val = SHMEM2_RD(bp, edebug_driver_if[1]);
1219
1220 if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT)
1221 return true;
1222 }
1223
1224 return false;
1225}
1226
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001227static void bnx2x_stats_update(struct bnx2x *bp)
1228{
1229 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1230
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001231 if (bnx2x_edebug_stats_stopped(bp))
1232 return;
1233
Ariel Elior67c431a2013-01-01 05:22:36 +00001234 if (IS_PF(bp)) {
1235 if (*stats_comp != DMAE_COMP_VAL)
1236 return;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001237
Ariel Elior67c431a2013-01-01 05:22:36 +00001238 if (bp->port.pmf)
1239 bnx2x_hw_stats_update(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001240
Ariel Elior67c431a2013-01-01 05:22:36 +00001241 if (bnx2x_storm_stats_update(bp)) {
1242 if (bp->stats_pending++ == 3) {
1243 BNX2X_ERR("storm stats were not updated for 3 times\n");
1244 bnx2x_panic();
1245 }
1246 return;
Dmitry Kravkovbef05402012-09-11 04:34:08 +00001247 }
Ariel Elior67c431a2013-01-01 05:22:36 +00001248 } else {
1249 /* vf doesn't collect HW statistics, and doesn't get completions
1250 * perform only update
1251 */
1252 bnx2x_storm_stats_update(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001253 }
1254
1255 bnx2x_net_stats_update(bp);
1256 bnx2x_drv_stats_update(bp);
1257
Ariel Elior67c431a2013-01-01 05:22:36 +00001258 /* vf is done */
1259 if (IS_VF(bp))
1260 return;
1261
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001262 if (netif_msg_timer(bp)) {
1263 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001264
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001265 netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n",
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001266 estats->brb_drop_lo, estats->brb_truncate_lo);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001267 }
1268
1269 bnx2x_hw_stats_post(bp);
1270 bnx2x_storm_stats_post(bp);
1271}
1272
1273static void bnx2x_port_stats_stop(struct bnx2x *bp)
1274{
1275 struct dmae_command *dmae;
1276 u32 opcode;
1277 int loader_idx = PMF_DMAE_C(bp);
1278 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1279
1280 bp->executer_idx = 0;
1281
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001282 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001283
1284 if (bp->port.port_stx) {
1285
1286 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1287 if (bp->func_stx)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001288 dmae->opcode = bnx2x_dmae_opcode_add_comp(
1289 opcode, DMAE_COMP_GRC);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001290 else
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001291 dmae->opcode = bnx2x_dmae_opcode_add_comp(
1292 opcode, DMAE_COMP_PCI);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001293
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001294 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1295 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1296 dmae->dst_addr_lo = bp->port.port_stx >> 2;
1297 dmae->dst_addr_hi = 0;
Barak Witkowski1d187b32011-12-05 22:41:50 +00001298 dmae->len = bnx2x_get_port_stats_dma_len(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001299 if (bp->func_stx) {
1300 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
1301 dmae->comp_addr_hi = 0;
1302 dmae->comp_val = 1;
1303 } else {
1304 dmae->comp_addr_lo =
1305 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1306 dmae->comp_addr_hi =
1307 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1308 dmae->comp_val = DMAE_COMP_VAL;
1309
1310 *stats_comp = 0;
1311 }
1312 }
1313
1314 if (bp->func_stx) {
1315
1316 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001317 dmae->opcode =
1318 bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001319 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
1320 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
1321 dmae->dst_addr_lo = bp->func_stx >> 2;
1322 dmae->dst_addr_hi = 0;
1323 dmae->len = sizeof(struct host_func_stats) >> 2;
1324 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1325 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1326 dmae->comp_val = DMAE_COMP_VAL;
1327
1328 *stats_comp = 0;
1329 }
1330}
1331
1332static void bnx2x_stats_stop(struct bnx2x *bp)
1333{
1334 int update = 0;
1335
1336 bnx2x_stats_comp(bp);
1337
1338 if (bp->port.pmf)
1339 update = (bnx2x_hw_stats_update(bp) == 0);
1340
1341 update |= (bnx2x_storm_stats_update(bp) == 0);
1342
1343 if (update) {
1344 bnx2x_net_stats_update(bp);
1345
1346 if (bp->port.pmf)
1347 bnx2x_port_stats_stop(bp);
1348
1349 bnx2x_hw_stats_post(bp);
1350 bnx2x_stats_comp(bp);
1351 }
1352}
1353
1354static void bnx2x_stats_do_nothing(struct bnx2x *bp)
1355{
1356}
1357
1358static const struct {
1359 void (*action)(struct bnx2x *bp);
1360 enum bnx2x_stats_state next_state;
1361} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
1362/* state event */
1363{
1364/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
1365/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
1366/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
1367/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
1368},
1369{
1370/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
1371/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
1372/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
1373/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
1374}
1375};
1376
1377void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
1378{
David S. Millerbb7e95c2010-07-27 21:01:35 -07001379 enum bnx2x_stats_state state;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001380 if (unlikely(bp->panic))
1381 return;
Dmitry Kravkov4a025f42011-11-13 04:34:30 +00001382
David S. Millerbb7e95c2010-07-27 21:01:35 -07001383 spin_lock_bh(&bp->stats_lock);
1384 state = bp->stats_state;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001385 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
David S. Millerbb7e95c2010-07-27 21:01:35 -07001386 spin_unlock_bh(&bp->stats_lock);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001387
Dmitry Kravkov4a025f42011-11-13 04:34:30 +00001388 bnx2x_stats_stm[state][event].action(bp);
1389
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001390 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
1391 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
1392 state, event, bp->stats_state);
1393}
1394
1395static void bnx2x_port_stats_base_init(struct bnx2x *bp)
1396{
1397 struct dmae_command *dmae;
1398 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1399
1400 /* sanity */
1401 if (!bp->port.pmf || !bp->port.port_stx) {
1402 BNX2X_ERR("BUG!\n");
1403 return;
1404 }
1405
1406 bp->executer_idx = 0;
1407
1408 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001409 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
1410 true, DMAE_COMP_PCI);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001411 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1412 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1413 dmae->dst_addr_lo = bp->port.port_stx >> 2;
1414 dmae->dst_addr_hi = 0;
Barak Witkowski1d187b32011-12-05 22:41:50 +00001415 dmae->len = bnx2x_get_port_stats_dma_len(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001416 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1417 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1418 dmae->comp_val = DMAE_COMP_VAL;
1419
1420 *stats_comp = 0;
1421 bnx2x_hw_stats_post(bp);
1422 bnx2x_stats_comp(bp);
1423}
1424
Ben Hutchings1aa8b472012-07-10 10:56:59 +00001425/* This function will prepare the statistics ramrod data the way
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001426 * we will only have to increment the statistics counter and
1427 * send the ramrod each time we have to.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001428 */
Eric Dumazet1191cb82012-04-27 21:39:21 +00001429static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001430{
1431 int i;
Barak Witkowski50f0a562011-12-05 21:52:23 +00001432 int first_queue_query_index;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001433 struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr;
1434
1435 dma_addr_t cur_data_offset;
1436 struct stats_query_entry *cur_query_entry;
1437
1438 stats_hdr->cmd_num = bp->fw_stats_num;
1439 stats_hdr->drv_stats_counter = 0;
1440
1441 /* storm_counters struct contains the counters of completed
1442 * statistics requests per storm which are incremented by FW
1443 * each time it completes hadning a statistics ramrod. We will
1444 * check these counters in the timer handler and discard a
1445 * (statistics) ramrod completion.
1446 */
1447 cur_data_offset = bp->fw_stats_data_mapping +
1448 offsetof(struct bnx2x_fw_stats_data, storm_counters);
1449
1450 stats_hdr->stats_counters_addrs.hi =
1451 cpu_to_le32(U64_HI(cur_data_offset));
1452 stats_hdr->stats_counters_addrs.lo =
1453 cpu_to_le32(U64_LO(cur_data_offset));
1454
1455 /* prepare to the first stats ramrod (will be completed with
1456 * the counters equal to zero) - init counters to somethig different.
1457 */
1458 memset(&bp->fw_stats_data->storm_counters, 0xff,
1459 sizeof(struct stats_counter));
1460
1461 /**** Port FW statistics data ****/
1462 cur_data_offset = bp->fw_stats_data_mapping +
1463 offsetof(struct bnx2x_fw_stats_data, port);
1464
1465 cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX];
1466
1467 cur_query_entry->kind = STATS_TYPE_PORT;
1468 /* For port query index is a DONT CARE */
1469 cur_query_entry->index = BP_PORT(bp);
1470 /* For port query funcID is a DONT CARE */
1471 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1472 cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
1473 cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
1474
1475 /**** PF FW statistics data ****/
1476 cur_data_offset = bp->fw_stats_data_mapping +
1477 offsetof(struct bnx2x_fw_stats_data, pf);
1478
1479 cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX];
1480
1481 cur_query_entry->kind = STATS_TYPE_PF;
1482 /* For PF query index is a DONT CARE */
1483 cur_query_entry->index = BP_PORT(bp);
1484 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1485 cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
1486 cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
1487
Barak Witkowski50f0a562011-12-05 21:52:23 +00001488 /**** FCoE FW statistics data ****/
1489 if (!NO_FCOE(bp)) {
1490 cur_data_offset = bp->fw_stats_data_mapping +
1491 offsetof(struct bnx2x_fw_stats_data, fcoe);
1492
1493 cur_query_entry =
1494 &bp->fw_stats_req->query[BNX2X_FCOE_QUERY_IDX];
1495
1496 cur_query_entry->kind = STATS_TYPE_FCOE;
1497 /* For FCoE query index is a DONT CARE */
1498 cur_query_entry->index = BP_PORT(bp);
1499 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1500 cur_query_entry->address.hi =
1501 cpu_to_le32(U64_HI(cur_data_offset));
1502 cur_query_entry->address.lo =
1503 cpu_to_le32(U64_LO(cur_data_offset));
1504 }
1505
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001506 /**** Clients' queries ****/
1507 cur_data_offset = bp->fw_stats_data_mapping +
1508 offsetof(struct bnx2x_fw_stats_data, queue_stats);
1509
Barak Witkowski50f0a562011-12-05 21:52:23 +00001510 /* first queue query index depends whether FCoE offloaded request will
1511 * be included in the ramrod
1512 */
1513 if (!NO_FCOE(bp))
1514 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX;
1515 else
1516 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1;
1517
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001518 for_each_eth_queue(bp, i) {
1519 cur_query_entry =
1520 &bp->fw_stats_req->
Barak Witkowski50f0a562011-12-05 21:52:23 +00001521 query[first_queue_query_index + i];
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001522
1523 cur_query_entry->kind = STATS_TYPE_QUEUE;
1524 cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]);
1525 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1526 cur_query_entry->address.hi =
1527 cpu_to_le32(U64_HI(cur_data_offset));
1528 cur_query_entry->address.lo =
1529 cpu_to_le32(U64_LO(cur_data_offset));
1530
1531 cur_data_offset += sizeof(struct per_queue_stats);
1532 }
Barak Witkowski50f0a562011-12-05 21:52:23 +00001533
1534 /* add FCoE queue query if needed */
1535 if (!NO_FCOE(bp)) {
1536 cur_query_entry =
1537 &bp->fw_stats_req->
1538 query[first_queue_query_index + i];
1539
1540 cur_query_entry->kind = STATS_TYPE_QUEUE;
Merav Sicron65565882012-06-19 07:48:26 +00001541 cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX(bp)]);
Barak Witkowski50f0a562011-12-05 21:52:23 +00001542 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1543 cur_query_entry->address.hi =
1544 cpu_to_le32(U64_HI(cur_data_offset));
1545 cur_query_entry->address.lo =
1546 cpu_to_le32(U64_LO(cur_data_offset));
1547 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001548}
1549
Ariel Elior5b0752c2013-03-27 01:05:15 +00001550void bnx2x_memset_stats(struct bnx2x *bp)
1551{
1552 int i;
1553
1554 /* function stats */
1555 for_each_queue(bp, i) {
1556 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i];
1557
1558 memset(&fp_stats->old_tclient, 0,
1559 sizeof(fp_stats->old_tclient));
1560 memset(&fp_stats->old_uclient, 0,
1561 sizeof(fp_stats->old_uclient));
1562 memset(&fp_stats->old_xclient, 0,
1563 sizeof(fp_stats->old_xclient));
1564 if (bp->stats_init) {
1565 memset(&fp_stats->eth_q_stats, 0,
1566 sizeof(fp_stats->eth_q_stats));
1567 memset(&fp_stats->eth_q_stats_old, 0,
1568 sizeof(fp_stats->eth_q_stats_old));
1569 }
1570 }
1571
1572 memset(&bp->dev->stats, 0, sizeof(bp->dev->stats));
1573
1574 if (bp->stats_init) {
1575 memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old));
1576 memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old));
1577 memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old));
1578 memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
1579 memset(&bp->func_stats, 0, sizeof(bp->func_stats));
1580 }
1581
1582 bp->stats_state = STATS_STATE_DISABLED;
1583
1584 if (bp->port.pmf && bp->port.port_stx)
1585 bnx2x_port_stats_base_init(bp);
1586
1587 /* mark the end of statistics initializiation */
1588 bp->stats_init = false;
1589}
1590
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001591void bnx2x_stats_init(struct bnx2x *bp)
1592{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001593 int /*abs*/port = BP_PORT(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001594 int mb_idx = BP_FW_MB_IDX(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001595
1596 bp->stats_pending = 0;
1597 bp->executer_idx = 0;
1598 bp->stats_counter = 0;
1599
1600 /* port and func stats for management */
1601 if (!BP_NOMCP(bp)) {
1602 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001603 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001604
1605 } else {
1606 bp->port.port_stx = 0;
1607 bp->func_stx = 0;
1608 }
1609 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
1610 bp->port.port_stx, bp->func_stx);
1611
Mintz Yuval1355b702012-02-15 02:10:22 +00001612 /* pmf should retrieve port statistics from SP on a non-init*/
1613 if (!bp->stats_init && bp->port.pmf && bp->port.port_stx)
1614 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
1615
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001616 port = BP_PORT(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001617 /* port stats */
1618 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
1619 bp->port.old_nig_stats.brb_discard =
1620 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
1621 bp->port.old_nig_stats.brb_truncate =
1622 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001623 if (!CHIP_IS_E3(bp)) {
1624 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
1625 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
1626 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
1627 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
1628 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001629
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001630 /* Prepare statistics ramrod data */
1631 bnx2x_prep_fw_stats_req(bp);
Dmitry Kravkov6fe8bce2010-10-06 03:35:03 +00001632
Ariel Elior5b0752c2013-03-27 01:05:15 +00001633 /* Clean SP from previous statistics */
Mintz Yuval1355b702012-02-15 02:10:22 +00001634 if (bp->stats_init) {
Mintz Yuval1355b702012-02-15 02:10:22 +00001635 if (bp->func_stx) {
1636 memset(bnx2x_sp(bp, func_stats), 0,
1637 sizeof(struct host_func_stats));
1638 bnx2x_func_stats_init(bp);
1639 bnx2x_hw_stats_post(bp);
1640 bnx2x_stats_comp(bp);
1641 }
1642 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001643
Ariel Elior5b0752c2013-03-27 01:05:15 +00001644 bnx2x_memset_stats(bp);
Mintz Yuval1355b702012-02-15 02:10:22 +00001645}
1646
1647void bnx2x_save_statistics(struct bnx2x *bp)
1648{
1649 int i;
1650 struct net_device_stats *nstats = &bp->dev->stats;
1651
1652 /* save queue statistics */
1653 for_each_eth_queue(bp, i) {
1654 struct bnx2x_fastpath *fp = &bp->fp[i];
Barak Witkowski15192a82012-06-19 07:48:28 +00001655 struct bnx2x_eth_q_stats *qstats =
1656 &bnx2x_fp_stats(bp, fp)->eth_q_stats;
1657 struct bnx2x_eth_q_stats_old *qstats_old =
1658 &bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
Mintz Yuval1355b702012-02-15 02:10:22 +00001659
1660 UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi);
1661 UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo);
1662 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi);
1663 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo);
1664 UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi);
1665 UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo);
1666 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi);
1667 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo);
1668 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi);
1669 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo);
1670 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi);
1671 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo);
1672 UPDATE_QSTAT_OLD(total_tpa_bytes_hi);
1673 UPDATE_QSTAT_OLD(total_tpa_bytes_lo);
1674 }
1675
1676 /* save net_device_stats statistics */
1677 bp->net_stats_old.rx_dropped = nstats->rx_dropped;
1678
1679 /* store port firmware statistics */
1680 if (bp->port.pmf && IS_MF(bp)) {
1681 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1682 struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old;
1683 UPDATE_FW_STAT_OLD(mac_filter_discard);
1684 UPDATE_FW_STAT_OLD(mf_tag_discard);
1685 UPDATE_FW_STAT_OLD(brb_truncate_discard);
1686 UPDATE_FW_STAT_OLD(mac_discard);
1687 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001688}
Barak Witkowskia3348722012-04-23 03:04:46 +00001689
1690void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
1691 u32 stats_type)
1692{
1693 int i;
1694 struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats;
1695 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1696 struct per_queue_stats *fcoe_q_stats =
Merav Sicron65565882012-06-19 07:48:26 +00001697 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)];
Barak Witkowskia3348722012-04-23 03:04:46 +00001698
1699 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
1700 &fcoe_q_stats->tstorm_queue_statistics;
1701
1702 struct ustorm_per_queue_stats *fcoe_q_ustorm_stats =
1703 &fcoe_q_stats->ustorm_queue_statistics;
1704
1705 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
1706 &fcoe_q_stats->xstorm_queue_statistics;
1707
1708 struct fcoe_statistics_params *fw_fcoe_stat =
1709 &bp->fw_stats_data->fcoe;
1710
1711 memset(afex_stats, 0, sizeof(struct afex_stats));
1712
1713 for_each_eth_queue(bp, i) {
Barak Witkowski15192a82012-06-19 07:48:28 +00001714 struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
Barak Witkowskia3348722012-04-23 03:04:46 +00001715
1716 ADD_64(afex_stats->rx_unicast_bytes_hi,
1717 qstats->total_unicast_bytes_received_hi,
1718 afex_stats->rx_unicast_bytes_lo,
1719 qstats->total_unicast_bytes_received_lo);
1720
1721 ADD_64(afex_stats->rx_broadcast_bytes_hi,
1722 qstats->total_broadcast_bytes_received_hi,
1723 afex_stats->rx_broadcast_bytes_lo,
1724 qstats->total_broadcast_bytes_received_lo);
1725
1726 ADD_64(afex_stats->rx_multicast_bytes_hi,
1727 qstats->total_multicast_bytes_received_hi,
1728 afex_stats->rx_multicast_bytes_lo,
1729 qstats->total_multicast_bytes_received_lo);
1730
1731 ADD_64(afex_stats->rx_unicast_frames_hi,
1732 qstats->total_unicast_packets_received_hi,
1733 afex_stats->rx_unicast_frames_lo,
1734 qstats->total_unicast_packets_received_lo);
1735
1736 ADD_64(afex_stats->rx_broadcast_frames_hi,
1737 qstats->total_broadcast_packets_received_hi,
1738 afex_stats->rx_broadcast_frames_lo,
1739 qstats->total_broadcast_packets_received_lo);
1740
1741 ADD_64(afex_stats->rx_multicast_frames_hi,
1742 qstats->total_multicast_packets_received_hi,
1743 afex_stats->rx_multicast_frames_lo,
1744 qstats->total_multicast_packets_received_lo);
1745
1746 /* sum to rx_frames_discarded all discraded
1747 * packets due to size, ttl0 and checksum
1748 */
1749 ADD_64(afex_stats->rx_frames_discarded_hi,
1750 qstats->total_packets_received_checksum_discarded_hi,
1751 afex_stats->rx_frames_discarded_lo,
1752 qstats->total_packets_received_checksum_discarded_lo);
1753
1754 ADD_64(afex_stats->rx_frames_discarded_hi,
1755 qstats->total_packets_received_ttl0_discarded_hi,
1756 afex_stats->rx_frames_discarded_lo,
1757 qstats->total_packets_received_ttl0_discarded_lo);
1758
1759 ADD_64(afex_stats->rx_frames_discarded_hi,
1760 qstats->etherstatsoverrsizepkts_hi,
1761 afex_stats->rx_frames_discarded_lo,
1762 qstats->etherstatsoverrsizepkts_lo);
1763
1764 ADD_64(afex_stats->rx_frames_dropped_hi,
1765 qstats->no_buff_discard_hi,
1766 afex_stats->rx_frames_dropped_lo,
1767 qstats->no_buff_discard_lo);
1768
1769 ADD_64(afex_stats->tx_unicast_bytes_hi,
1770 qstats->total_unicast_bytes_transmitted_hi,
1771 afex_stats->tx_unicast_bytes_lo,
1772 qstats->total_unicast_bytes_transmitted_lo);
1773
1774 ADD_64(afex_stats->tx_broadcast_bytes_hi,
1775 qstats->total_broadcast_bytes_transmitted_hi,
1776 afex_stats->tx_broadcast_bytes_lo,
1777 qstats->total_broadcast_bytes_transmitted_lo);
1778
1779 ADD_64(afex_stats->tx_multicast_bytes_hi,
1780 qstats->total_multicast_bytes_transmitted_hi,
1781 afex_stats->tx_multicast_bytes_lo,
1782 qstats->total_multicast_bytes_transmitted_lo);
1783
1784 ADD_64(afex_stats->tx_unicast_frames_hi,
1785 qstats->total_unicast_packets_transmitted_hi,
1786 afex_stats->tx_unicast_frames_lo,
1787 qstats->total_unicast_packets_transmitted_lo);
1788
1789 ADD_64(afex_stats->tx_broadcast_frames_hi,
1790 qstats->total_broadcast_packets_transmitted_hi,
1791 afex_stats->tx_broadcast_frames_lo,
1792 qstats->total_broadcast_packets_transmitted_lo);
1793
1794 ADD_64(afex_stats->tx_multicast_frames_hi,
1795 qstats->total_multicast_packets_transmitted_hi,
1796 afex_stats->tx_multicast_frames_lo,
1797 qstats->total_multicast_packets_transmitted_lo);
1798
1799 ADD_64(afex_stats->tx_frames_dropped_hi,
1800 qstats->total_transmitted_dropped_packets_error_hi,
1801 afex_stats->tx_frames_dropped_lo,
1802 qstats->total_transmitted_dropped_packets_error_lo);
1803 }
1804
1805 /* now add FCoE statistics which are collected separately
1806 * (both offloaded and non offloaded)
1807 */
1808 if (!NO_FCOE(bp)) {
1809 ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
1810 LE32_0,
1811 afex_stats->rx_unicast_bytes_lo,
1812 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
1813
1814 ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
1815 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
1816 afex_stats->rx_unicast_bytes_lo,
1817 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
1818
1819 ADD_64_LE(afex_stats->rx_broadcast_bytes_hi,
1820 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
1821 afex_stats->rx_broadcast_bytes_lo,
1822 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
1823
1824 ADD_64_LE(afex_stats->rx_multicast_bytes_hi,
1825 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
1826 afex_stats->rx_multicast_bytes_lo,
1827 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
1828
1829 ADD_64_LE(afex_stats->rx_unicast_frames_hi,
1830 LE32_0,
1831 afex_stats->rx_unicast_frames_lo,
1832 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
1833
1834 ADD_64_LE(afex_stats->rx_unicast_frames_hi,
1835 LE32_0,
1836 afex_stats->rx_unicast_frames_lo,
1837 fcoe_q_tstorm_stats->rcv_ucast_pkts);
1838
1839 ADD_64_LE(afex_stats->rx_broadcast_frames_hi,
1840 LE32_0,
1841 afex_stats->rx_broadcast_frames_lo,
1842 fcoe_q_tstorm_stats->rcv_bcast_pkts);
1843
1844 ADD_64_LE(afex_stats->rx_multicast_frames_hi,
1845 LE32_0,
1846 afex_stats->rx_multicast_frames_lo,
1847 fcoe_q_tstorm_stats->rcv_ucast_pkts);
1848
1849 ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1850 LE32_0,
1851 afex_stats->rx_frames_discarded_lo,
1852 fcoe_q_tstorm_stats->checksum_discard);
1853
1854 ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1855 LE32_0,
1856 afex_stats->rx_frames_discarded_lo,
1857 fcoe_q_tstorm_stats->pkts_too_big_discard);
1858
1859 ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1860 LE32_0,
1861 afex_stats->rx_frames_discarded_lo,
1862 fcoe_q_tstorm_stats->ttl0_discard);
1863
1864 ADD_64_LE16(afex_stats->rx_frames_dropped_hi,
1865 LE16_0,
1866 afex_stats->rx_frames_dropped_lo,
1867 fcoe_q_tstorm_stats->no_buff_discard);
1868
1869 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1870 LE32_0,
1871 afex_stats->rx_frames_dropped_lo,
1872 fcoe_q_ustorm_stats->ucast_no_buff_pkts);
1873
1874 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1875 LE32_0,
1876 afex_stats->rx_frames_dropped_lo,
1877 fcoe_q_ustorm_stats->mcast_no_buff_pkts);
1878
1879 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1880 LE32_0,
1881 afex_stats->rx_frames_dropped_lo,
1882 fcoe_q_ustorm_stats->bcast_no_buff_pkts);
1883
1884 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1885 LE32_0,
1886 afex_stats->rx_frames_dropped_lo,
1887 fw_fcoe_stat->rx_stat1.fcoe_rx_drop_pkt_cnt);
1888
1889 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1890 LE32_0,
1891 afex_stats->rx_frames_dropped_lo,
1892 fw_fcoe_stat->rx_stat2.fcoe_rx_drop_pkt_cnt);
1893
1894 ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
1895 LE32_0,
1896 afex_stats->tx_unicast_bytes_lo,
1897 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
1898
1899 ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
1900 fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
1901 afex_stats->tx_unicast_bytes_lo,
1902 fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
1903
1904 ADD_64_LE(afex_stats->tx_broadcast_bytes_hi,
1905 fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
1906 afex_stats->tx_broadcast_bytes_lo,
1907 fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
1908
1909 ADD_64_LE(afex_stats->tx_multicast_bytes_hi,
1910 fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
1911 afex_stats->tx_multicast_bytes_lo,
1912 fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
1913
1914 ADD_64_LE(afex_stats->tx_unicast_frames_hi,
1915 LE32_0,
1916 afex_stats->tx_unicast_frames_lo,
1917 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
1918
1919 ADD_64_LE(afex_stats->tx_unicast_frames_hi,
1920 LE32_0,
1921 afex_stats->tx_unicast_frames_lo,
1922 fcoe_q_xstorm_stats->ucast_pkts_sent);
1923
1924 ADD_64_LE(afex_stats->tx_broadcast_frames_hi,
1925 LE32_0,
1926 afex_stats->tx_broadcast_frames_lo,
1927 fcoe_q_xstorm_stats->bcast_pkts_sent);
1928
1929 ADD_64_LE(afex_stats->tx_multicast_frames_hi,
1930 LE32_0,
1931 afex_stats->tx_multicast_frames_lo,
1932 fcoe_q_xstorm_stats->mcast_pkts_sent);
1933
1934 ADD_64_LE(afex_stats->tx_frames_dropped_hi,
1935 LE32_0,
1936 afex_stats->tx_frames_dropped_lo,
1937 fcoe_q_xstorm_stats->error_drop_pkts);
1938 }
1939
1940 /* if port stats are requested, add them to the PMF
1941 * stats, as anyway they will be accumulated by the
1942 * MCP before sent to the switch
1943 */
1944 if ((bp->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) {
1945 ADD_64(afex_stats->rx_frames_dropped_hi,
1946 0,
1947 afex_stats->rx_frames_dropped_lo,
1948 estats->mac_filter_discard);
1949 ADD_64(afex_stats->rx_frames_dropped_hi,
1950 0,
1951 afex_stats->rx_frames_dropped_lo,
1952 estats->brb_truncate_discard);
1953 ADD_64(afex_stats->rx_frames_discarded_hi,
1954 0,
1955 afex_stats->rx_frames_discarded_lo,
1956 estats->mac_discard);
1957 }
1958}