blob: ab00b352f4a90dd70b500750ad774b59359f2ad2 [file] [log] [blame]
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001/* bnx2x_stats.c: Broadcom Everest network driver.
2 *
Ariel Elior85b26ea2012-01-26 06:01:54 +00003 * Copyright (c) 2007-2012 Broadcom Corporation
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
Joe Perchesf1deab52011-08-14 12:16:21 +000017
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Dmitry Kravkovf85582f2010-10-06 03:34:21 +000020#include "bnx2x_stats.h"
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030021#include "bnx2x_cmn.h"
22
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000023
24/* Statistics */
25
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000026/*
27 * General service functions
28 */
29
30static inline long bnx2x_hilo(u32 *hiref)
31{
32 u32 lo = *(hiref + 1);
33#if (BITS_PER_LONG == 64)
34 u32 hi = *hiref;
35
36 return HILO_U64(hi, lo);
37#else
38 return lo;
39#endif
40}
41
Yuval Mintz08e9acc2012-09-10 05:51:04 +000042static inline u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp)
Barak Witkowski1d187b32011-12-05 22:41:50 +000043{
Yuval Mintz08e9acc2012-09-10 05:51:04 +000044 u16 res = 0;
Barak Witkowski1d187b32011-12-05 22:41:50 +000045
Yuval Mintz08e9acc2012-09-10 05:51:04 +000046 /* 'newest' convention - shmem2 cotains the size of the port stats */
47 if (SHMEM2_HAS(bp, sizeof_port_stats)) {
48 u32 size = SHMEM2_RD(bp, sizeof_port_stats);
49 if (size)
50 res = size;
Barak Witkowski1d187b32011-12-05 22:41:50 +000051
Yuval Mintz08e9acc2012-09-10 05:51:04 +000052 /* prevent newer BC from causing buffer overflow */
53 if (res > sizeof(struct host_port_stats))
54 res = sizeof(struct host_port_stats);
55 }
56
57 /* Older convention - all BCs support the port stats' fields up until
58 * the 'not_used' field
59 */
60 if (!res) {
61 res = offsetof(struct host_port_stats, not_used) + 4;
62
63 /* if PFC stats are supported by the MFW, DMA them as well */
64 if (bp->flags & BC_SUPPORTS_PFC_STATS) {
65 res += offsetof(struct host_port_stats,
66 pfc_frames_rx_lo) -
67 offsetof(struct host_port_stats,
68 pfc_frames_tx_hi) + 4 ;
69 }
70 }
71
72 res >>= 2;
73
74 WARN_ON(res > 2 * DMAE_LEN32_RD_MAX);
Barak Witkowski1d187b32011-12-05 22:41:50 +000075 return res;
76}
77
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000078/*
79 * Init service functions
80 */
81
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030082/* Post the next statistics ramrod. Protect it with the spin in
83 * order to ensure the strict order between statistics ramrods
84 * (each ramrod has a sequence number passed in a
85 * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be
86 * sent in order).
87 */
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000088static void bnx2x_storm_stats_post(struct bnx2x *bp)
89{
90 if (!bp->stats_pending) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030091 int rc;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000092
David S. Millerbb7e95c2010-07-27 21:01:35 -070093 spin_lock_bh(&bp->stats_lock);
94
Vladislav Zolotarov9bcb8012011-01-09 02:20:34 +000095 if (bp->stats_pending) {
96 spin_unlock_bh(&bp->stats_lock);
97 return;
98 }
99
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300100 bp->fw_stats_req->hdr.drv_stats_counter =
101 cpu_to_le16(bp->stats_counter++);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000102
Merav Sicron51c1a582012-03-18 10:33:38 +0000103 DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300104 bp->fw_stats_req->hdr.drv_stats_counter);
105
106
107
108 /* send FW stats ramrod */
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000109 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300110 U64_HI(bp->fw_stats_req_mapping),
111 U64_LO(bp->fw_stats_req_mapping),
112 NONE_CONNECTION_TYPE);
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +0000113 if (rc == 0)
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000114 bp->stats_pending = 1;
David S. Millerbb7e95c2010-07-27 21:01:35 -0700115
116 spin_unlock_bh(&bp->stats_lock);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000117 }
118}
119
120static void bnx2x_hw_stats_post(struct bnx2x *bp)
121{
122 struct dmae_command *dmae = &bp->stats_dmae;
123 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
124
125 *stats_comp = DMAE_COMP_VAL;
126 if (CHIP_REV_IS_SLOW(bp))
127 return;
128
129 /* loader */
130 if (bp->executer_idx) {
131 int loader_idx = PMF_DMAE_C(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000132 u32 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
133 true, DMAE_COMP_GRC);
134 opcode = bnx2x_dmae_opcode_clr_src_reset(opcode);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000135
136 memset(dmae, 0, sizeof(struct dmae_command));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000137 dmae->opcode = opcode;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000138 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
139 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
140 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
141 sizeof(struct dmae_command) *
142 (loader_idx + 1)) >> 2;
143 dmae->dst_addr_hi = 0;
144 dmae->len = sizeof(struct dmae_command) >> 2;
145 if (CHIP_IS_E1(bp))
146 dmae->len--;
147 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
148 dmae->comp_addr_hi = 0;
149 dmae->comp_val = 1;
150
151 *stats_comp = 0;
152 bnx2x_post_dmae(bp, dmae, loader_idx);
153
154 } else if (bp->func_stx) {
155 *stats_comp = 0;
Yuval Mintzcb4dca22012-03-18 10:33:44 +0000156 memcpy(bnx2x_sp(bp, func_stats), &bp->func_stats,
157 sizeof(bp->func_stats));
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000158 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
159 }
160}
161
162static int bnx2x_stats_comp(struct bnx2x *bp)
163{
164 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
165 int cnt = 10;
166
167 might_sleep();
168 while (*stats_comp != DMAE_COMP_VAL) {
169 if (!cnt) {
170 BNX2X_ERR("timeout waiting for stats finished\n");
171 break;
172 }
173 cnt--;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300174 usleep_range(1000, 1000);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000175 }
176 return 1;
177}
178
179/*
180 * Statistics service functions
181 */
182
183static void bnx2x_stats_pmf_update(struct bnx2x *bp)
184{
185 struct dmae_command *dmae;
186 u32 opcode;
187 int loader_idx = PMF_DMAE_C(bp);
188 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
189
190 /* sanity */
Mintz Yuval1355b702012-02-15 02:10:22 +0000191 if (!bp->port.pmf || !bp->port.port_stx) {
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000192 BNX2X_ERR("BUG!\n");
193 return;
194 }
195
196 bp->executer_idx = 0;
197
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000198 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000199
200 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000201 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000202 dmae->src_addr_lo = bp->port.port_stx >> 2;
203 dmae->src_addr_hi = 0;
204 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
205 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
206 dmae->len = DMAE_LEN32_RD_MAX;
207 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
208 dmae->comp_addr_hi = 0;
209 dmae->comp_val = 1;
210
211 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000212 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000213 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
214 dmae->src_addr_hi = 0;
215 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
216 DMAE_LEN32_RD_MAX * 4);
217 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
218 DMAE_LEN32_RD_MAX * 4);
Barak Witkowski1d187b32011-12-05 22:41:50 +0000219 dmae->len = bnx2x_get_port_stats_dma_len(bp) - DMAE_LEN32_RD_MAX;
220
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000221 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
222 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
223 dmae->comp_val = DMAE_COMP_VAL;
224
225 *stats_comp = 0;
226 bnx2x_hw_stats_post(bp);
227 bnx2x_stats_comp(bp);
228}
229
230static void bnx2x_port_stats_init(struct bnx2x *bp)
231{
232 struct dmae_command *dmae;
233 int port = BP_PORT(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000234 u32 opcode;
235 int loader_idx = PMF_DMAE_C(bp);
236 u32 mac_addr;
237 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
238
239 /* sanity */
240 if (!bp->link_vars.link_up || !bp->port.pmf) {
241 BNX2X_ERR("BUG!\n");
242 return;
243 }
244
245 bp->executer_idx = 0;
246
247 /* MCP */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000248 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
249 true, DMAE_COMP_GRC);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000250
251 if (bp->port.port_stx) {
252
253 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
254 dmae->opcode = opcode;
255 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
256 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
257 dmae->dst_addr_lo = bp->port.port_stx >> 2;
258 dmae->dst_addr_hi = 0;
Barak Witkowski1d187b32011-12-05 22:41:50 +0000259 dmae->len = bnx2x_get_port_stats_dma_len(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000260 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
261 dmae->comp_addr_hi = 0;
262 dmae->comp_val = 1;
263 }
264
265 if (bp->func_stx) {
266
267 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
268 dmae->opcode = opcode;
269 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
270 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
271 dmae->dst_addr_lo = bp->func_stx >> 2;
272 dmae->dst_addr_hi = 0;
273 dmae->len = sizeof(struct host_func_stats) >> 2;
274 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
275 dmae->comp_addr_hi = 0;
276 dmae->comp_val = 1;
277 }
278
279 /* MAC */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000280 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
281 true, DMAE_COMP_GRC);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000282
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300283 /* EMAC is special */
284 if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000285 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
286
287 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
288 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
289 dmae->opcode = opcode;
290 dmae->src_addr_lo = (mac_addr +
291 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
292 dmae->src_addr_hi = 0;
293 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
294 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
295 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
296 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
297 dmae->comp_addr_hi = 0;
298 dmae->comp_val = 1;
299
300 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
301 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
302 dmae->opcode = opcode;
303 dmae->src_addr_lo = (mac_addr +
304 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
305 dmae->src_addr_hi = 0;
306 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
307 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
308 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
309 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
310 dmae->len = 1;
311 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
312 dmae->comp_addr_hi = 0;
313 dmae->comp_val = 1;
314
315 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
316 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
317 dmae->opcode = opcode;
318 dmae->src_addr_lo = (mac_addr +
319 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
320 dmae->src_addr_hi = 0;
321 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
322 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
323 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
324 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
325 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
326 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
327 dmae->comp_addr_hi = 0;
328 dmae->comp_val = 1;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300329 } else {
330 u32 tx_src_addr_lo, rx_src_addr_lo;
331 u16 rx_len, tx_len;
332
333 /* configure the params according to MAC type */
334 switch (bp->link_vars.mac_type) {
335 case MAC_TYPE_BMAC:
336 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
337 NIG_REG_INGRESS_BMAC0_MEM);
338
339 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
340 BIGMAC_REGISTER_TX_STAT_GTBYT */
341 if (CHIP_IS_E1x(bp)) {
342 tx_src_addr_lo = (mac_addr +
343 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
344 tx_len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
345 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
346 rx_src_addr_lo = (mac_addr +
347 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
348 rx_len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
349 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
350 } else {
351 tx_src_addr_lo = (mac_addr +
352 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
353 tx_len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
354 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
355 rx_src_addr_lo = (mac_addr +
356 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
357 rx_len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
358 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
359 }
360 break;
361
362 case MAC_TYPE_UMAC: /* handled by MSTAT */
363 case MAC_TYPE_XMAC: /* handled by MSTAT */
364 default:
365 mac_addr = port ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0;
366 tx_src_addr_lo = (mac_addr +
367 MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2;
368 rx_src_addr_lo = (mac_addr +
369 MSTAT_REG_RX_STAT_GR64_LO) >> 2;
370 tx_len = sizeof(bp->slowpath->
371 mac_stats.mstat_stats.stats_tx) >> 2;
372 rx_len = sizeof(bp->slowpath->
373 mac_stats.mstat_stats.stats_rx) >> 2;
374 break;
375 }
376
377 /* TX stats */
378 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
379 dmae->opcode = opcode;
380 dmae->src_addr_lo = tx_src_addr_lo;
381 dmae->src_addr_hi = 0;
382 dmae->len = tx_len;
383 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
384 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
385 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
386 dmae->comp_addr_hi = 0;
387 dmae->comp_val = 1;
388
389 /* RX stats */
390 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
391 dmae->opcode = opcode;
392 dmae->src_addr_hi = 0;
393 dmae->src_addr_lo = rx_src_addr_lo;
394 dmae->dst_addr_lo =
395 U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
396 dmae->dst_addr_hi =
397 U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
398 dmae->len = rx_len;
399 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
400 dmae->comp_addr_hi = 0;
401 dmae->comp_val = 1;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000402 }
403
404 /* NIG */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300405 if (!CHIP_IS_E3(bp)) {
406 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
407 dmae->opcode = opcode;
408 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
409 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
410 dmae->src_addr_hi = 0;
411 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
412 offsetof(struct nig_stats, egress_mac_pkt0_lo));
413 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
414 offsetof(struct nig_stats, egress_mac_pkt0_lo));
415 dmae->len = (2*sizeof(u32)) >> 2;
416 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
417 dmae->comp_addr_hi = 0;
418 dmae->comp_val = 1;
419
420 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
421 dmae->opcode = opcode;
422 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
423 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
424 dmae->src_addr_hi = 0;
425 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
426 offsetof(struct nig_stats, egress_mac_pkt1_lo));
427 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
428 offsetof(struct nig_stats, egress_mac_pkt1_lo));
429 dmae->len = (2*sizeof(u32)) >> 2;
430 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
431 dmae->comp_addr_hi = 0;
432 dmae->comp_val = 1;
433 }
434
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000435 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300436 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
437 true, DMAE_COMP_PCI);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000438 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
439 NIG_REG_STAT0_BRB_DISCARD) >> 2;
440 dmae->src_addr_hi = 0;
441 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
442 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
443 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000444
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000445 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
446 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
447 dmae->comp_val = DMAE_COMP_VAL;
448
449 *stats_comp = 0;
450}
451
452static void bnx2x_func_stats_init(struct bnx2x *bp)
453{
454 struct dmae_command *dmae = &bp->stats_dmae;
455 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
456
457 /* sanity */
458 if (!bp->func_stx) {
459 BNX2X_ERR("BUG!\n");
460 return;
461 }
462
463 bp->executer_idx = 0;
464 memset(dmae, 0, sizeof(struct dmae_command));
465
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000466 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
467 true, DMAE_COMP_PCI);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000468 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
469 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
470 dmae->dst_addr_lo = bp->func_stx >> 2;
471 dmae->dst_addr_hi = 0;
472 dmae->len = sizeof(struct host_func_stats) >> 2;
473 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
474 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
475 dmae->comp_val = DMAE_COMP_VAL;
476
477 *stats_comp = 0;
478}
479
480static void bnx2x_stats_start(struct bnx2x *bp)
481{
482 if (bp->port.pmf)
483 bnx2x_port_stats_init(bp);
484
485 else if (bp->func_stx)
486 bnx2x_func_stats_init(bp);
487
488 bnx2x_hw_stats_post(bp);
489 bnx2x_storm_stats_post(bp);
490}
491
492static void bnx2x_stats_pmf_start(struct bnx2x *bp)
493{
494 bnx2x_stats_comp(bp);
495 bnx2x_stats_pmf_update(bp);
496 bnx2x_stats_start(bp);
497}
498
499static void bnx2x_stats_restart(struct bnx2x *bp)
500{
501 bnx2x_stats_comp(bp);
502 bnx2x_stats_start(bp);
503}
504
505static void bnx2x_bmac_stats_update(struct bnx2x *bp)
506{
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000507 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
508 struct bnx2x_eth_stats *estats = &bp->eth_stats;
509 struct {
510 u32 lo;
511 u32 hi;
512 } diff;
513
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000514 if (CHIP_IS_E1x(bp)) {
515 struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats);
516
517 /* the macros below will use "bmac1_stats" type */
518 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
519 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
520 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
521 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
522 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
523 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
524 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
525 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300526 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
527
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000528 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
529 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
530 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
531 UPDATE_STAT64(tx_stat_gt127,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000532 tx_stat_etherstatspkts65octetsto127octets);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000533 UPDATE_STAT64(tx_stat_gt255,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000534 tx_stat_etherstatspkts128octetsto255octets);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000535 UPDATE_STAT64(tx_stat_gt511,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000536 tx_stat_etherstatspkts256octetsto511octets);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000537 UPDATE_STAT64(tx_stat_gt1023,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000538 tx_stat_etherstatspkts512octetsto1023octets);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000539 UPDATE_STAT64(tx_stat_gt1518,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000540 tx_stat_etherstatspkts1024octetsto1522octets);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300541 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
542 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
543 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
544 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000545 UPDATE_STAT64(tx_stat_gterr,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000546 tx_stat_dot3statsinternalmactransmiterrors);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300547 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000548
549 } else {
550 struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats);
551
552 /* the macros below will use "bmac2_stats" type */
553 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
554 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
555 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
556 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
557 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
558 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
559 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
560 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300561 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000562 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
563 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
564 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
565 UPDATE_STAT64(tx_stat_gt127,
566 tx_stat_etherstatspkts65octetsto127octets);
567 UPDATE_STAT64(tx_stat_gt255,
568 tx_stat_etherstatspkts128octetsto255octets);
569 UPDATE_STAT64(tx_stat_gt511,
570 tx_stat_etherstatspkts256octetsto511octets);
571 UPDATE_STAT64(tx_stat_gt1023,
572 tx_stat_etherstatspkts512octetsto1023octets);
573 UPDATE_STAT64(tx_stat_gt1518,
574 tx_stat_etherstatspkts1024octetsto1522octets);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300575 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
576 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
577 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
578 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000579 UPDATE_STAT64(tx_stat_gterr,
580 tx_stat_dot3statsinternalmactransmiterrors);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300581 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000582
583 /* collect PFC stats */
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000584 pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
585 pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000586
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000587 pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
588 pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000589 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000590
591 estats->pause_frames_received_hi =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300592 pstats->mac_stx[1].rx_stat_mac_xpf_hi;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000593 estats->pause_frames_received_lo =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300594 pstats->mac_stx[1].rx_stat_mac_xpf_lo;
595
596 estats->pause_frames_sent_hi =
597 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
598 estats->pause_frames_sent_lo =
599 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000600
601 estats->pfc_frames_received_hi =
602 pstats->pfc_frames_rx_hi;
603 estats->pfc_frames_received_lo =
604 pstats->pfc_frames_rx_lo;
605 estats->pfc_frames_sent_hi =
606 pstats->pfc_frames_tx_hi;
607 estats->pfc_frames_sent_lo =
608 pstats->pfc_frames_tx_lo;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300609}
610
611static void bnx2x_mstat_stats_update(struct bnx2x *bp)
612{
613 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
614 struct bnx2x_eth_stats *estats = &bp->eth_stats;
615
616 struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats);
617
618 ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets);
619 ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors);
620 ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts);
621 ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong);
622 ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments);
623 ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived);
624 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered);
625 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf);
626 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
627 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
628
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000629 /* collect pfc stats */
630 ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi,
631 pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo);
632 ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi,
633 pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300634
635 ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
636 ADD_STAT64(stats_tx.tx_gt127,
637 tx_stat_etherstatspkts65octetsto127octets);
638 ADD_STAT64(stats_tx.tx_gt255,
639 tx_stat_etherstatspkts128octetsto255octets);
640 ADD_STAT64(stats_tx.tx_gt511,
641 tx_stat_etherstatspkts256octetsto511octets);
642 ADD_STAT64(stats_tx.tx_gt1023,
643 tx_stat_etherstatspkts512octetsto1023octets);
644 ADD_STAT64(stats_tx.tx_gt1518,
645 tx_stat_etherstatspkts1024octetsto1522octets);
646 ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047);
647
648 ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095);
649 ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216);
650 ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383);
651
652 ADD_STAT64(stats_tx.tx_gterr,
653 tx_stat_dot3statsinternalmactransmiterrors);
654 ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl);
655
Mintz Yuval1355b702012-02-15 02:10:22 +0000656 estats->etherstatspkts1024octetsto1522octets_hi =
657 pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi;
658 estats->etherstatspkts1024octetsto1522octets_lo =
659 pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo;
660
661 estats->etherstatspktsover1522octets_hi =
662 pstats->mac_stx[1].tx_stat_mac_2047_hi;
663 estats->etherstatspktsover1522octets_lo =
664 pstats->mac_stx[1].tx_stat_mac_2047_lo;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300665
666 ADD_64(estats->etherstatspktsover1522octets_hi,
Mintz Yuval1355b702012-02-15 02:10:22 +0000667 pstats->mac_stx[1].tx_stat_mac_4095_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300668 estats->etherstatspktsover1522octets_lo,
Mintz Yuval1355b702012-02-15 02:10:22 +0000669 pstats->mac_stx[1].tx_stat_mac_4095_lo);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300670
671 ADD_64(estats->etherstatspktsover1522octets_hi,
Mintz Yuval1355b702012-02-15 02:10:22 +0000672 pstats->mac_stx[1].tx_stat_mac_9216_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300673 estats->etherstatspktsover1522octets_lo,
Mintz Yuval1355b702012-02-15 02:10:22 +0000674 pstats->mac_stx[1].tx_stat_mac_9216_lo);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300675
676 ADD_64(estats->etherstatspktsover1522octets_hi,
Mintz Yuval1355b702012-02-15 02:10:22 +0000677 pstats->mac_stx[1].tx_stat_mac_16383_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300678 estats->etherstatspktsover1522octets_lo,
Mintz Yuval1355b702012-02-15 02:10:22 +0000679 pstats->mac_stx[1].tx_stat_mac_16383_lo);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300680
681 estats->pause_frames_received_hi =
682 pstats->mac_stx[1].rx_stat_mac_xpf_hi;
683 estats->pause_frames_received_lo =
684 pstats->mac_stx[1].rx_stat_mac_xpf_lo;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000685
686 estats->pause_frames_sent_hi =
687 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
688 estats->pause_frames_sent_lo =
689 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000690
691 estats->pfc_frames_received_hi =
692 pstats->pfc_frames_rx_hi;
693 estats->pfc_frames_received_lo =
694 pstats->pfc_frames_rx_lo;
695 estats->pfc_frames_sent_hi =
696 pstats->pfc_frames_tx_hi;
697 estats->pfc_frames_sent_lo =
698 pstats->pfc_frames_tx_lo;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000699}
700
701static void bnx2x_emac_stats_update(struct bnx2x *bp)
702{
703 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
704 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
705 struct bnx2x_eth_stats *estats = &bp->eth_stats;
706
707 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
708 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
709 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
710 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
711 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
712 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
713 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
714 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
715 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
716 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
717 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
718 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
719 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
720 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
721 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
722 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
723 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
724 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
725 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
726 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
727 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
728 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
729 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
730 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
731 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
732 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
733 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
734 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
735 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
736 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
737 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
738
739 estats->pause_frames_received_hi =
740 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
741 estats->pause_frames_received_lo =
742 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
743 ADD_64(estats->pause_frames_received_hi,
744 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
745 estats->pause_frames_received_lo,
746 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
747
748 estats->pause_frames_sent_hi =
749 pstats->mac_stx[1].tx_stat_outxonsent_hi;
750 estats->pause_frames_sent_lo =
751 pstats->mac_stx[1].tx_stat_outxonsent_lo;
752 ADD_64(estats->pause_frames_sent_hi,
753 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
754 estats->pause_frames_sent_lo,
755 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
756}
757
758static int bnx2x_hw_stats_update(struct bnx2x *bp)
759{
760 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
761 struct nig_stats *old = &(bp->port.old_nig_stats);
762 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
763 struct bnx2x_eth_stats *estats = &bp->eth_stats;
764 struct {
765 u32 lo;
766 u32 hi;
767 } diff;
768
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300769 switch (bp->link_vars.mac_type) {
770 case MAC_TYPE_BMAC:
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000771 bnx2x_bmac_stats_update(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300772 break;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000773
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300774 case MAC_TYPE_EMAC:
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000775 bnx2x_emac_stats_update(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300776 break;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000777
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300778 case MAC_TYPE_UMAC:
779 case MAC_TYPE_XMAC:
780 bnx2x_mstat_stats_update(bp);
781 break;
782
783 case MAC_TYPE_NONE: /* unreached */
David S. Miller8decf862011-09-22 03:23:13 -0400784 DP(BNX2X_MSG_STATS,
785 "stats updated by DMAE but no MAC active\n");
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000786 return -1;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300787
788 default: /* unreached */
789 BNX2X_ERR("Unknown MAC type\n");
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000790 }
791
792 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
793 new->brb_discard - old->brb_discard);
794 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
795 new->brb_truncate - old->brb_truncate);
796
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300797 if (!CHIP_IS_E3(bp)) {
798 UPDATE_STAT64_NIG(egress_mac_pkt0,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000799 etherstatspkts1024octetsto1522octets);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300800 UPDATE_STAT64_NIG(egress_mac_pkt1,
801 etherstatspktsover1522octets);
802 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000803
804 memcpy(old, new, sizeof(struct nig_stats));
805
806 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
807 sizeof(struct mac_stx));
808 estats->brb_drop_hi = pstats->brb_drop_hi;
809 estats->brb_drop_lo = pstats->brb_drop_lo;
810
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000811 pstats->host_port_stats_counter++;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000812
Yuval Mintzc20cd5d2012-07-23 21:16:06 +0000813 if (CHIP_IS_E3(bp)) {
814 u32 lpi_reg = BP_PORT(bp) ? MISC_REG_CPMU_LP_SM_ENT_CNT_P1
815 : MISC_REG_CPMU_LP_SM_ENT_CNT_P0;
816 estats->eee_tx_lpi += REG_RD(bp, lpi_reg);
817 }
Yuval Mintzc8c60d82012-06-06 17:13:07 +0000818
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000819 if (!BP_NOMCP(bp)) {
820 u32 nig_timer_max =
821 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
822 if (nig_timer_max != estats->nig_timer_max) {
823 estats->nig_timer_max = nig_timer_max;
824 BNX2X_ERR("NIG timer max (%u)\n",
825 estats->nig_timer_max);
826 }
827 }
828
829 return 0;
830}
831
832static int bnx2x_storm_stats_update(struct bnx2x *bp)
833{
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000834 struct tstorm_per_port_stats *tport =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300835 &bp->fw_stats_data->port.tstorm_port_statistics;
836 struct tstorm_per_pf_stats *tfunc =
837 &bp->fw_stats_data->pf.tstorm_pf_statistics;
Yuval Mintzcb4dca22012-03-18 10:33:44 +0000838 struct host_func_stats *fstats = &bp->func_stats;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000839 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Mintz Yuval1355b702012-02-15 02:10:22 +0000840 struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300841 struct stats_counter *counters = &bp->fw_stats_data->storm_counters;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000842 int i;
David S. Millerbb7e95c2010-07-27 21:01:35 -0700843 u16 cur_stats_counter;
844
845 /* Make sure we use the value of the counter
846 * used for sending the last stats ramrod.
847 */
848 spin_lock_bh(&bp->stats_lock);
849 cur_stats_counter = bp->stats_counter - 1;
850 spin_unlock_bh(&bp->stats_lock);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000851
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300852 /* are storm stats valid? */
853 if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000854 DP(BNX2X_MSG_STATS,
855 "stats not updated by xstorm xstorm counter (0x%x) != stats_counter (0x%x)\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300856 le16_to_cpu(counters->xstats_counter), bp->stats_counter);
857 return -EAGAIN;
858 }
859
860 if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000861 DP(BNX2X_MSG_STATS,
862 "stats not updated by ustorm ustorm counter (0x%x) != stats_counter (0x%x)\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300863 le16_to_cpu(counters->ustats_counter), bp->stats_counter);
864 return -EAGAIN;
865 }
866
867 if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000868 DP(BNX2X_MSG_STATS,
869 "stats not updated by cstorm cstorm counter (0x%x) != stats_counter (0x%x)\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300870 le16_to_cpu(counters->cstats_counter), bp->stats_counter);
871 return -EAGAIN;
872 }
873
874 if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000875 DP(BNX2X_MSG_STATS,
876 "stats not updated by tstorm tstorm counter (0x%x) != stats_counter (0x%x)\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300877 le16_to_cpu(counters->tstats_counter), bp->stats_counter);
878 return -EAGAIN;
879 }
880
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000881 estats->error_bytes_received_hi = 0;
882 estats->error_bytes_received_lo = 0;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000883
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000884 for_each_eth_queue(bp, i) {
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000885 struct bnx2x_fastpath *fp = &bp->fp[i];
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300886 struct tstorm_per_queue_stats *tclient =
887 &bp->fw_stats_data->queue_stats[i].
888 tstorm_queue_statistics;
Barak Witkowski15192a82012-06-19 07:48:28 +0000889 struct tstorm_per_queue_stats *old_tclient =
890 &bnx2x_fp_stats(bp, fp)->old_tclient;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300891 struct ustorm_per_queue_stats *uclient =
892 &bp->fw_stats_data->queue_stats[i].
893 ustorm_queue_statistics;
Barak Witkowski15192a82012-06-19 07:48:28 +0000894 struct ustorm_per_queue_stats *old_uclient =
895 &bnx2x_fp_stats(bp, fp)->old_uclient;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300896 struct xstorm_per_queue_stats *xclient =
897 &bp->fw_stats_data->queue_stats[i].
898 xstorm_queue_statistics;
Barak Witkowski15192a82012-06-19 07:48:28 +0000899 struct xstorm_per_queue_stats *old_xclient =
900 &bnx2x_fp_stats(bp, fp)->old_xclient;
901 struct bnx2x_eth_q_stats *qstats =
902 &bnx2x_fp_stats(bp, fp)->eth_q_stats;
903 struct bnx2x_eth_q_stats_old *qstats_old =
904 &bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
Mintz Yuval1355b702012-02-15 02:10:22 +0000905
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000906 u32 diff;
907
Merav Sicron51c1a582012-03-18 10:33:38 +0000908 DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, bcast_sent 0x%x mcast_sent 0x%x\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300909 i, xclient->ucast_pkts_sent,
910 xclient->bcast_pkts_sent, xclient->mcast_pkts_sent);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000911
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300912 DP(BNX2X_MSG_STATS, "---------------\n");
913
Mintz Yuval1355b702012-02-15 02:10:22 +0000914 UPDATE_QSTAT(tclient->rcv_bcast_bytes,
915 total_broadcast_bytes_received);
916 UPDATE_QSTAT(tclient->rcv_mcast_bytes,
917 total_multicast_bytes_received);
918 UPDATE_QSTAT(tclient->rcv_ucast_bytes,
919 total_unicast_bytes_received);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300920
921 /*
922 * sum to total_bytes_received all
923 * unicast/multicast/broadcast
924 */
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000925 qstats->total_bytes_received_hi =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300926 qstats->total_broadcast_bytes_received_hi;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000927 qstats->total_bytes_received_lo =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300928 qstats->total_broadcast_bytes_received_lo;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000929
930 ADD_64(qstats->total_bytes_received_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300931 qstats->total_multicast_bytes_received_hi,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000932 qstats->total_bytes_received_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300933 qstats->total_multicast_bytes_received_lo);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000934
935 ADD_64(qstats->total_bytes_received_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300936 qstats->total_unicast_bytes_received_hi,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000937 qstats->total_bytes_received_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300938 qstats->total_unicast_bytes_received_lo);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000939
940 qstats->valid_bytes_received_hi =
941 qstats->total_bytes_received_hi;
942 qstats->valid_bytes_received_lo =
943 qstats->total_bytes_received_lo;
944
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000945
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300946 UPDATE_EXTEND_TSTAT(rcv_ucast_pkts,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000947 total_unicast_packets_received);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300948 UPDATE_EXTEND_TSTAT(rcv_mcast_pkts,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000949 total_multicast_packets_received);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300950 UPDATE_EXTEND_TSTAT(rcv_bcast_pkts,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000951 total_broadcast_packets_received);
Mintz Yuval1355b702012-02-15 02:10:22 +0000952 UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard,
953 etherstatsoverrsizepkts);
954 UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000955
956 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
957 total_unicast_packets_received);
958 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
959 total_multicast_packets_received);
960 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
961 total_broadcast_packets_received);
Mintz Yuval1355b702012-02-15 02:10:22 +0000962 UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard);
963 UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard);
964 UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000965
Mintz Yuval1355b702012-02-15 02:10:22 +0000966 UPDATE_QSTAT(xclient->bcast_bytes_sent,
967 total_broadcast_bytes_transmitted);
968 UPDATE_QSTAT(xclient->mcast_bytes_sent,
969 total_multicast_bytes_transmitted);
970 UPDATE_QSTAT(xclient->ucast_bytes_sent,
971 total_unicast_bytes_transmitted);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300972
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300973 /*
974 * sum to total_bytes_transmitted all
975 * unicast/multicast/broadcast
976 */
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000977 qstats->total_bytes_transmitted_hi =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300978 qstats->total_unicast_bytes_transmitted_hi;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000979 qstats->total_bytes_transmitted_lo =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300980 qstats->total_unicast_bytes_transmitted_lo;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000981
982 ADD_64(qstats->total_bytes_transmitted_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300983 qstats->total_broadcast_bytes_transmitted_hi,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000984 qstats->total_bytes_transmitted_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300985 qstats->total_broadcast_bytes_transmitted_lo);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000986
987 ADD_64(qstats->total_bytes_transmitted_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300988 qstats->total_multicast_bytes_transmitted_hi,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000989 qstats->total_bytes_transmitted_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300990 qstats->total_multicast_bytes_transmitted_lo);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000991
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300992 UPDATE_EXTEND_XSTAT(ucast_pkts_sent,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000993 total_unicast_packets_transmitted);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300994 UPDATE_EXTEND_XSTAT(mcast_pkts_sent,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000995 total_multicast_packets_transmitted);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300996 UPDATE_EXTEND_XSTAT(bcast_pkts_sent,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000997 total_broadcast_packets_transmitted);
998
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300999 UPDATE_EXTEND_TSTAT(checksum_discard,
1000 total_packets_received_checksum_discarded);
1001 UPDATE_EXTEND_TSTAT(ttl0_discard,
1002 total_packets_received_ttl0_discarded);
1003
1004 UPDATE_EXTEND_XSTAT(error_drop_pkts,
1005 total_transmitted_dropped_packets_error);
1006
1007 /* TPA aggregations completed */
Mintz Yuval1355b702012-02-15 02:10:22 +00001008 UPDATE_EXTEND_E_USTAT(coalesced_events, total_tpa_aggregations);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001009 /* Number of network frames aggregated by TPA */
Mintz Yuval1355b702012-02-15 02:10:22 +00001010 UPDATE_EXTEND_E_USTAT(coalesced_pkts,
1011 total_tpa_aggregated_frames);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001012 /* Total number of bytes in completed TPA aggregations */
Mintz Yuval1355b702012-02-15 02:10:22 +00001013 UPDATE_QSTAT(uclient->coalesced_bytes, total_tpa_bytes);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001014
Mintz Yuval1355b702012-02-15 02:10:22 +00001015 UPDATE_ESTAT_QSTAT_64(total_tpa_bytes);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001016
Mintz Yuval1355b702012-02-15 02:10:22 +00001017 UPDATE_FSTAT_QSTAT(total_bytes_received);
1018 UPDATE_FSTAT_QSTAT(total_bytes_transmitted);
1019 UPDATE_FSTAT_QSTAT(total_unicast_packets_received);
1020 UPDATE_FSTAT_QSTAT(total_multicast_packets_received);
1021 UPDATE_FSTAT_QSTAT(total_broadcast_packets_received);
1022 UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted);
1023 UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted);
1024 UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted);
1025 UPDATE_FSTAT_QSTAT(valid_bytes_received);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001026 }
1027
Mintz Yuval1355b702012-02-15 02:10:22 +00001028 ADD_64(estats->total_bytes_received_hi,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001029 estats->rx_stat_ifhcinbadoctets_hi,
Mintz Yuval1355b702012-02-15 02:10:22 +00001030 estats->total_bytes_received_lo,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001031 estats->rx_stat_ifhcinbadoctets_lo);
1032
Mintz Yuval1355b702012-02-15 02:10:22 +00001033 ADD_64(estats->total_bytes_received_hi,
Eric Dumazet66d885c2012-02-13 06:23:12 +00001034 le32_to_cpu(tfunc->rcv_error_bytes.hi),
Mintz Yuval1355b702012-02-15 02:10:22 +00001035 estats->total_bytes_received_lo,
Eric Dumazet66d885c2012-02-13 06:23:12 +00001036 le32_to_cpu(tfunc->rcv_error_bytes.lo));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001037
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001038 ADD_64(estats->error_bytes_received_hi,
Eric Dumazet66d885c2012-02-13 06:23:12 +00001039 le32_to_cpu(tfunc->rcv_error_bytes.hi),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001040 estats->error_bytes_received_lo,
Eric Dumazet66d885c2012-02-13 06:23:12 +00001041 le32_to_cpu(tfunc->rcv_error_bytes.lo));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001042
Mintz Yuval1355b702012-02-15 02:10:22 +00001043 UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong);
1044
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001045 ADD_64(estats->error_bytes_received_hi,
1046 estats->rx_stat_ifhcinbadoctets_hi,
1047 estats->error_bytes_received_lo,
1048 estats->rx_stat_ifhcinbadoctets_lo);
1049
1050 if (bp->port.pmf) {
Mintz Yuval1355b702012-02-15 02:10:22 +00001051 struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old;
1052 UPDATE_FW_STAT(mac_filter_discard);
1053 UPDATE_FW_STAT(mf_tag_discard);
1054 UPDATE_FW_STAT(brb_truncate_discard);
1055 UPDATE_FW_STAT(mac_discard);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001056 }
1057
1058 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
1059
1060 bp->stats_pending = 0;
1061
1062 return 0;
1063}
1064
1065static void bnx2x_net_stats_update(struct bnx2x *bp)
1066{
1067 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1068 struct net_device_stats *nstats = &bp->dev->stats;
Eric Dumazetc6a056a2010-08-23 19:44:08 +00001069 unsigned long tmp;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001070 int i;
1071
1072 nstats->rx_packets =
1073 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
1074 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
1075 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
1076
1077 nstats->tx_packets =
1078 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
1079 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
1080 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
1081
1082 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
1083
1084 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
1085
Eric Dumazetc6a056a2010-08-23 19:44:08 +00001086 tmp = estats->mac_discard;
Barak Witkowski15192a82012-06-19 07:48:28 +00001087 for_each_rx_queue(bp, i) {
1088 struct tstorm_per_queue_stats *old_tclient =
1089 &bp->fp_stats[i].old_tclient;
1090 tmp += le32_to_cpu(old_tclient->checksum_discard);
1091 }
Mintz Yuval1355b702012-02-15 02:10:22 +00001092 nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001093
1094 nstats->tx_dropped = 0;
1095
1096 nstats->multicast =
1097 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
1098
1099 nstats->collisions =
1100 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
1101
1102 nstats->rx_length_errors =
1103 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
1104 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
1105 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
1106 bnx2x_hilo(&estats->brb_truncate_hi);
1107 nstats->rx_crc_errors =
1108 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
1109 nstats->rx_frame_errors =
1110 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
1111 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001112 nstats->rx_missed_errors = 0;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001113
1114 nstats->rx_errors = nstats->rx_length_errors +
1115 nstats->rx_over_errors +
1116 nstats->rx_crc_errors +
1117 nstats->rx_frame_errors +
1118 nstats->rx_fifo_errors +
1119 nstats->rx_missed_errors;
1120
1121 nstats->tx_aborted_errors =
1122 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
1123 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
1124 nstats->tx_carrier_errors =
1125 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
1126 nstats->tx_fifo_errors = 0;
1127 nstats->tx_heartbeat_errors = 0;
1128 nstats->tx_window_errors = 0;
1129
1130 nstats->tx_errors = nstats->tx_aborted_errors +
1131 nstats->tx_carrier_errors +
1132 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
1133}
1134
1135static void bnx2x_drv_stats_update(struct bnx2x *bp)
1136{
1137 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1138 int i;
1139
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001140 for_each_queue(bp, i) {
Barak Witkowski15192a82012-06-19 07:48:28 +00001141 struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
Mintz Yuval1355b702012-02-15 02:10:22 +00001142 struct bnx2x_eth_q_stats_old *qstats_old =
Barak Witkowski15192a82012-06-19 07:48:28 +00001143 &bp->fp_stats[i].eth_q_stats_old;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001144
Mintz Yuval1355b702012-02-15 02:10:22 +00001145 UPDATE_ESTAT_QSTAT(driver_xoff);
1146 UPDATE_ESTAT_QSTAT(rx_err_discard_pkt);
1147 UPDATE_ESTAT_QSTAT(rx_skb_alloc_failed);
1148 UPDATE_ESTAT_QSTAT(hw_csum_err);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001149 }
1150}
1151
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001152static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp)
1153{
1154 u32 val;
1155
1156 if (SHMEM2_HAS(bp, edebug_driver_if[1])) {
1157 val = SHMEM2_RD(bp, edebug_driver_if[1]);
1158
1159 if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT)
1160 return true;
1161 }
1162
1163 return false;
1164}
1165
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001166static void bnx2x_stats_update(struct bnx2x *bp)
1167{
1168 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1169
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001170 if (bnx2x_edebug_stats_stopped(bp))
1171 return;
1172
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001173 if (*stats_comp != DMAE_COMP_VAL)
1174 return;
1175
1176 if (bp->port.pmf)
1177 bnx2x_hw_stats_update(bp);
1178
1179 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
1180 BNX2X_ERR("storm stats were not updated for 3 times\n");
1181 bnx2x_panic();
1182 return;
1183 }
1184
1185 bnx2x_net_stats_update(bp);
1186 bnx2x_drv_stats_update(bp);
1187
1188 if (netif_msg_timer(bp)) {
1189 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001190
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001191 netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n",
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001192 estats->brb_drop_lo, estats->brb_truncate_lo);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001193 }
1194
1195 bnx2x_hw_stats_post(bp);
1196 bnx2x_storm_stats_post(bp);
1197}
1198
1199static void bnx2x_port_stats_stop(struct bnx2x *bp)
1200{
1201 struct dmae_command *dmae;
1202 u32 opcode;
1203 int loader_idx = PMF_DMAE_C(bp);
1204 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1205
1206 bp->executer_idx = 0;
1207
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001208 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001209
1210 if (bp->port.port_stx) {
1211
1212 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1213 if (bp->func_stx)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001214 dmae->opcode = bnx2x_dmae_opcode_add_comp(
1215 opcode, DMAE_COMP_GRC);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001216 else
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001217 dmae->opcode = bnx2x_dmae_opcode_add_comp(
1218 opcode, DMAE_COMP_PCI);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001219
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001220 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1221 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1222 dmae->dst_addr_lo = bp->port.port_stx >> 2;
1223 dmae->dst_addr_hi = 0;
Barak Witkowski1d187b32011-12-05 22:41:50 +00001224 dmae->len = bnx2x_get_port_stats_dma_len(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001225 if (bp->func_stx) {
1226 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
1227 dmae->comp_addr_hi = 0;
1228 dmae->comp_val = 1;
1229 } else {
1230 dmae->comp_addr_lo =
1231 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1232 dmae->comp_addr_hi =
1233 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1234 dmae->comp_val = DMAE_COMP_VAL;
1235
1236 *stats_comp = 0;
1237 }
1238 }
1239
1240 if (bp->func_stx) {
1241
1242 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001243 dmae->opcode =
1244 bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001245 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
1246 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
1247 dmae->dst_addr_lo = bp->func_stx >> 2;
1248 dmae->dst_addr_hi = 0;
1249 dmae->len = sizeof(struct host_func_stats) >> 2;
1250 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1251 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1252 dmae->comp_val = DMAE_COMP_VAL;
1253
1254 *stats_comp = 0;
1255 }
1256}
1257
1258static void bnx2x_stats_stop(struct bnx2x *bp)
1259{
1260 int update = 0;
1261
1262 bnx2x_stats_comp(bp);
1263
1264 if (bp->port.pmf)
1265 update = (bnx2x_hw_stats_update(bp) == 0);
1266
1267 update |= (bnx2x_storm_stats_update(bp) == 0);
1268
1269 if (update) {
1270 bnx2x_net_stats_update(bp);
1271
1272 if (bp->port.pmf)
1273 bnx2x_port_stats_stop(bp);
1274
1275 bnx2x_hw_stats_post(bp);
1276 bnx2x_stats_comp(bp);
1277 }
1278}
1279
1280static void bnx2x_stats_do_nothing(struct bnx2x *bp)
1281{
1282}
1283
1284static const struct {
1285 void (*action)(struct bnx2x *bp);
1286 enum bnx2x_stats_state next_state;
1287} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
1288/* state event */
1289{
1290/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
1291/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
1292/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
1293/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
1294},
1295{
1296/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
1297/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
1298/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
1299/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
1300}
1301};
1302
1303void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
1304{
David S. Millerbb7e95c2010-07-27 21:01:35 -07001305 enum bnx2x_stats_state state;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001306 if (unlikely(bp->panic))
1307 return;
Dmitry Kravkov4a025f42011-11-13 04:34:30 +00001308
David S. Millerbb7e95c2010-07-27 21:01:35 -07001309 spin_lock_bh(&bp->stats_lock);
1310 state = bp->stats_state;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001311 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
David S. Millerbb7e95c2010-07-27 21:01:35 -07001312 spin_unlock_bh(&bp->stats_lock);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001313
Dmitry Kravkov4a025f42011-11-13 04:34:30 +00001314 bnx2x_stats_stm[state][event].action(bp);
1315
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001316 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
1317 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
1318 state, event, bp->stats_state);
1319}
1320
1321static void bnx2x_port_stats_base_init(struct bnx2x *bp)
1322{
1323 struct dmae_command *dmae;
1324 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1325
1326 /* sanity */
1327 if (!bp->port.pmf || !bp->port.port_stx) {
1328 BNX2X_ERR("BUG!\n");
1329 return;
1330 }
1331
1332 bp->executer_idx = 0;
1333
1334 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001335 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
1336 true, DMAE_COMP_PCI);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001337 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1338 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1339 dmae->dst_addr_lo = bp->port.port_stx >> 2;
1340 dmae->dst_addr_hi = 0;
Barak Witkowski1d187b32011-12-05 22:41:50 +00001341 dmae->len = bnx2x_get_port_stats_dma_len(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001342 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1343 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1344 dmae->comp_val = DMAE_COMP_VAL;
1345
1346 *stats_comp = 0;
1347 bnx2x_hw_stats_post(bp);
1348 bnx2x_stats_comp(bp);
1349}
1350
Ben Hutchings1aa8b472012-07-10 10:56:59 +00001351/* This function will prepare the statistics ramrod data the way
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001352 * we will only have to increment the statistics counter and
1353 * send the ramrod each time we have to.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001354 */
Eric Dumazet1191cb82012-04-27 21:39:21 +00001355static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001356{
1357 int i;
Barak Witkowski50f0a562011-12-05 21:52:23 +00001358 int first_queue_query_index;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001359 struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr;
1360
1361 dma_addr_t cur_data_offset;
1362 struct stats_query_entry *cur_query_entry;
1363
1364 stats_hdr->cmd_num = bp->fw_stats_num;
1365 stats_hdr->drv_stats_counter = 0;
1366
1367 /* storm_counters struct contains the counters of completed
1368 * statistics requests per storm which are incremented by FW
1369 * each time it completes hadning a statistics ramrod. We will
1370 * check these counters in the timer handler and discard a
1371 * (statistics) ramrod completion.
1372 */
1373 cur_data_offset = bp->fw_stats_data_mapping +
1374 offsetof(struct bnx2x_fw_stats_data, storm_counters);
1375
1376 stats_hdr->stats_counters_addrs.hi =
1377 cpu_to_le32(U64_HI(cur_data_offset));
1378 stats_hdr->stats_counters_addrs.lo =
1379 cpu_to_le32(U64_LO(cur_data_offset));
1380
1381 /* prepare to the first stats ramrod (will be completed with
1382 * the counters equal to zero) - init counters to somethig different.
1383 */
1384 memset(&bp->fw_stats_data->storm_counters, 0xff,
1385 sizeof(struct stats_counter));
1386
1387 /**** Port FW statistics data ****/
1388 cur_data_offset = bp->fw_stats_data_mapping +
1389 offsetof(struct bnx2x_fw_stats_data, port);
1390
1391 cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX];
1392
1393 cur_query_entry->kind = STATS_TYPE_PORT;
1394 /* For port query index is a DONT CARE */
1395 cur_query_entry->index = BP_PORT(bp);
1396 /* For port query funcID is a DONT CARE */
1397 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1398 cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
1399 cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
1400
1401 /**** PF FW statistics data ****/
1402 cur_data_offset = bp->fw_stats_data_mapping +
1403 offsetof(struct bnx2x_fw_stats_data, pf);
1404
1405 cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX];
1406
1407 cur_query_entry->kind = STATS_TYPE_PF;
1408 /* For PF query index is a DONT CARE */
1409 cur_query_entry->index = BP_PORT(bp);
1410 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1411 cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
1412 cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
1413
Barak Witkowski50f0a562011-12-05 21:52:23 +00001414 /**** FCoE FW statistics data ****/
1415 if (!NO_FCOE(bp)) {
1416 cur_data_offset = bp->fw_stats_data_mapping +
1417 offsetof(struct bnx2x_fw_stats_data, fcoe);
1418
1419 cur_query_entry =
1420 &bp->fw_stats_req->query[BNX2X_FCOE_QUERY_IDX];
1421
1422 cur_query_entry->kind = STATS_TYPE_FCOE;
1423 /* For FCoE query index is a DONT CARE */
1424 cur_query_entry->index = BP_PORT(bp);
1425 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1426 cur_query_entry->address.hi =
1427 cpu_to_le32(U64_HI(cur_data_offset));
1428 cur_query_entry->address.lo =
1429 cpu_to_le32(U64_LO(cur_data_offset));
1430 }
1431
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001432 /**** Clients' queries ****/
1433 cur_data_offset = bp->fw_stats_data_mapping +
1434 offsetof(struct bnx2x_fw_stats_data, queue_stats);
1435
Barak Witkowski50f0a562011-12-05 21:52:23 +00001436 /* first queue query index depends whether FCoE offloaded request will
1437 * be included in the ramrod
1438 */
1439 if (!NO_FCOE(bp))
1440 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX;
1441 else
1442 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1;
1443
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001444 for_each_eth_queue(bp, i) {
1445 cur_query_entry =
1446 &bp->fw_stats_req->
Barak Witkowski50f0a562011-12-05 21:52:23 +00001447 query[first_queue_query_index + i];
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001448
1449 cur_query_entry->kind = STATS_TYPE_QUEUE;
1450 cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]);
1451 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1452 cur_query_entry->address.hi =
1453 cpu_to_le32(U64_HI(cur_data_offset));
1454 cur_query_entry->address.lo =
1455 cpu_to_le32(U64_LO(cur_data_offset));
1456
1457 cur_data_offset += sizeof(struct per_queue_stats);
1458 }
Barak Witkowski50f0a562011-12-05 21:52:23 +00001459
1460 /* add FCoE queue query if needed */
1461 if (!NO_FCOE(bp)) {
1462 cur_query_entry =
1463 &bp->fw_stats_req->
1464 query[first_queue_query_index + i];
1465
1466 cur_query_entry->kind = STATS_TYPE_QUEUE;
Merav Sicron65565882012-06-19 07:48:26 +00001467 cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX(bp)]);
Barak Witkowski50f0a562011-12-05 21:52:23 +00001468 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1469 cur_query_entry->address.hi =
1470 cpu_to_le32(U64_HI(cur_data_offset));
1471 cur_query_entry->address.lo =
1472 cpu_to_le32(U64_LO(cur_data_offset));
1473 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001474}
1475
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001476void bnx2x_stats_init(struct bnx2x *bp)
1477{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001478 int /*abs*/port = BP_PORT(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001479 int mb_idx = BP_FW_MB_IDX(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001480 int i;
1481
1482 bp->stats_pending = 0;
1483 bp->executer_idx = 0;
1484 bp->stats_counter = 0;
1485
1486 /* port and func stats for management */
1487 if (!BP_NOMCP(bp)) {
1488 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001489 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001490
1491 } else {
1492 bp->port.port_stx = 0;
1493 bp->func_stx = 0;
1494 }
1495 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
1496 bp->port.port_stx, bp->func_stx);
1497
Mintz Yuval1355b702012-02-15 02:10:22 +00001498 /* pmf should retrieve port statistics from SP on a non-init*/
1499 if (!bp->stats_init && bp->port.pmf && bp->port.port_stx)
1500 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
1501
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001502 port = BP_PORT(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001503 /* port stats */
1504 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
1505 bp->port.old_nig_stats.brb_discard =
1506 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
1507 bp->port.old_nig_stats.brb_truncate =
1508 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001509 if (!CHIP_IS_E3(bp)) {
1510 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
1511 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
1512 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
1513 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
1514 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001515
1516 /* function stats */
1517 for_each_queue(bp, i) {
Barak Witkowski15192a82012-06-19 07:48:28 +00001518 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i];
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001519
Barak Witkowski15192a82012-06-19 07:48:28 +00001520 memset(&fp_stats->old_tclient, 0,
1521 sizeof(fp_stats->old_tclient));
1522 memset(&fp_stats->old_uclient, 0,
1523 sizeof(fp_stats->old_uclient));
1524 memset(&fp_stats->old_xclient, 0,
1525 sizeof(fp_stats->old_xclient));
Mintz Yuval1355b702012-02-15 02:10:22 +00001526 if (bp->stats_init) {
Barak Witkowski15192a82012-06-19 07:48:28 +00001527 memset(&fp_stats->eth_q_stats, 0,
1528 sizeof(fp_stats->eth_q_stats));
1529 memset(&fp_stats->eth_q_stats_old, 0,
1530 sizeof(fp_stats->eth_q_stats_old));
Mintz Yuval1355b702012-02-15 02:10:22 +00001531 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001532 }
1533
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001534 /* Prepare statistics ramrod data */
1535 bnx2x_prep_fw_stats_req(bp);
Dmitry Kravkov6fe8bce2010-10-06 03:35:03 +00001536
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001537 memset(&bp->dev->stats, 0, sizeof(bp->dev->stats));
Mintz Yuval1355b702012-02-15 02:10:22 +00001538 if (bp->stats_init) {
1539 memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old));
1540 memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old));
1541 memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old));
1542 memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
Yuval Mintzcb4dca22012-03-18 10:33:44 +00001543 memset(&bp->func_stats, 0, sizeof(bp->func_stats));
Mintz Yuval1355b702012-02-15 02:10:22 +00001544
1545 /* Clean SP from previous statistics */
1546 if (bp->func_stx) {
1547 memset(bnx2x_sp(bp, func_stats), 0,
1548 sizeof(struct host_func_stats));
1549 bnx2x_func_stats_init(bp);
1550 bnx2x_hw_stats_post(bp);
1551 bnx2x_stats_comp(bp);
1552 }
1553 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001554
1555 bp->stats_state = STATS_STATE_DISABLED;
1556
Mintz Yuval1355b702012-02-15 02:10:22 +00001557 if (bp->port.pmf && bp->port.port_stx)
1558 bnx2x_port_stats_base_init(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001559
Mintz Yuval1355b702012-02-15 02:10:22 +00001560 /* mark the end of statistics initializiation */
1561 bp->stats_init = false;
1562}
1563
1564void bnx2x_save_statistics(struct bnx2x *bp)
1565{
1566 int i;
1567 struct net_device_stats *nstats = &bp->dev->stats;
1568
1569 /* save queue statistics */
1570 for_each_eth_queue(bp, i) {
1571 struct bnx2x_fastpath *fp = &bp->fp[i];
Barak Witkowski15192a82012-06-19 07:48:28 +00001572 struct bnx2x_eth_q_stats *qstats =
1573 &bnx2x_fp_stats(bp, fp)->eth_q_stats;
1574 struct bnx2x_eth_q_stats_old *qstats_old =
1575 &bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
Mintz Yuval1355b702012-02-15 02:10:22 +00001576
1577 UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi);
1578 UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo);
1579 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi);
1580 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo);
1581 UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi);
1582 UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo);
1583 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi);
1584 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo);
1585 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi);
1586 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo);
1587 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi);
1588 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo);
1589 UPDATE_QSTAT_OLD(total_tpa_bytes_hi);
1590 UPDATE_QSTAT_OLD(total_tpa_bytes_lo);
1591 }
1592
1593 /* save net_device_stats statistics */
1594 bp->net_stats_old.rx_dropped = nstats->rx_dropped;
1595
1596 /* store port firmware statistics */
1597 if (bp->port.pmf && IS_MF(bp)) {
1598 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1599 struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old;
1600 UPDATE_FW_STAT_OLD(mac_filter_discard);
1601 UPDATE_FW_STAT_OLD(mf_tag_discard);
1602 UPDATE_FW_STAT_OLD(brb_truncate_discard);
1603 UPDATE_FW_STAT_OLD(mac_discard);
1604 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001605}
Barak Witkowskia3348722012-04-23 03:04:46 +00001606
1607void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
1608 u32 stats_type)
1609{
1610 int i;
1611 struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats;
1612 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1613 struct per_queue_stats *fcoe_q_stats =
Merav Sicron65565882012-06-19 07:48:26 +00001614 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)];
Barak Witkowskia3348722012-04-23 03:04:46 +00001615
1616 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
1617 &fcoe_q_stats->tstorm_queue_statistics;
1618
1619 struct ustorm_per_queue_stats *fcoe_q_ustorm_stats =
1620 &fcoe_q_stats->ustorm_queue_statistics;
1621
1622 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
1623 &fcoe_q_stats->xstorm_queue_statistics;
1624
1625 struct fcoe_statistics_params *fw_fcoe_stat =
1626 &bp->fw_stats_data->fcoe;
1627
1628 memset(afex_stats, 0, sizeof(struct afex_stats));
1629
1630 for_each_eth_queue(bp, i) {
Barak Witkowski15192a82012-06-19 07:48:28 +00001631 struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
Barak Witkowskia3348722012-04-23 03:04:46 +00001632
1633 ADD_64(afex_stats->rx_unicast_bytes_hi,
1634 qstats->total_unicast_bytes_received_hi,
1635 afex_stats->rx_unicast_bytes_lo,
1636 qstats->total_unicast_bytes_received_lo);
1637
1638 ADD_64(afex_stats->rx_broadcast_bytes_hi,
1639 qstats->total_broadcast_bytes_received_hi,
1640 afex_stats->rx_broadcast_bytes_lo,
1641 qstats->total_broadcast_bytes_received_lo);
1642
1643 ADD_64(afex_stats->rx_multicast_bytes_hi,
1644 qstats->total_multicast_bytes_received_hi,
1645 afex_stats->rx_multicast_bytes_lo,
1646 qstats->total_multicast_bytes_received_lo);
1647
1648 ADD_64(afex_stats->rx_unicast_frames_hi,
1649 qstats->total_unicast_packets_received_hi,
1650 afex_stats->rx_unicast_frames_lo,
1651 qstats->total_unicast_packets_received_lo);
1652
1653 ADD_64(afex_stats->rx_broadcast_frames_hi,
1654 qstats->total_broadcast_packets_received_hi,
1655 afex_stats->rx_broadcast_frames_lo,
1656 qstats->total_broadcast_packets_received_lo);
1657
1658 ADD_64(afex_stats->rx_multicast_frames_hi,
1659 qstats->total_multicast_packets_received_hi,
1660 afex_stats->rx_multicast_frames_lo,
1661 qstats->total_multicast_packets_received_lo);
1662
1663 /* sum to rx_frames_discarded all discraded
1664 * packets due to size, ttl0 and checksum
1665 */
1666 ADD_64(afex_stats->rx_frames_discarded_hi,
1667 qstats->total_packets_received_checksum_discarded_hi,
1668 afex_stats->rx_frames_discarded_lo,
1669 qstats->total_packets_received_checksum_discarded_lo);
1670
1671 ADD_64(afex_stats->rx_frames_discarded_hi,
1672 qstats->total_packets_received_ttl0_discarded_hi,
1673 afex_stats->rx_frames_discarded_lo,
1674 qstats->total_packets_received_ttl0_discarded_lo);
1675
1676 ADD_64(afex_stats->rx_frames_discarded_hi,
1677 qstats->etherstatsoverrsizepkts_hi,
1678 afex_stats->rx_frames_discarded_lo,
1679 qstats->etherstatsoverrsizepkts_lo);
1680
1681 ADD_64(afex_stats->rx_frames_dropped_hi,
1682 qstats->no_buff_discard_hi,
1683 afex_stats->rx_frames_dropped_lo,
1684 qstats->no_buff_discard_lo);
1685
1686 ADD_64(afex_stats->tx_unicast_bytes_hi,
1687 qstats->total_unicast_bytes_transmitted_hi,
1688 afex_stats->tx_unicast_bytes_lo,
1689 qstats->total_unicast_bytes_transmitted_lo);
1690
1691 ADD_64(afex_stats->tx_broadcast_bytes_hi,
1692 qstats->total_broadcast_bytes_transmitted_hi,
1693 afex_stats->tx_broadcast_bytes_lo,
1694 qstats->total_broadcast_bytes_transmitted_lo);
1695
1696 ADD_64(afex_stats->tx_multicast_bytes_hi,
1697 qstats->total_multicast_bytes_transmitted_hi,
1698 afex_stats->tx_multicast_bytes_lo,
1699 qstats->total_multicast_bytes_transmitted_lo);
1700
1701 ADD_64(afex_stats->tx_unicast_frames_hi,
1702 qstats->total_unicast_packets_transmitted_hi,
1703 afex_stats->tx_unicast_frames_lo,
1704 qstats->total_unicast_packets_transmitted_lo);
1705
1706 ADD_64(afex_stats->tx_broadcast_frames_hi,
1707 qstats->total_broadcast_packets_transmitted_hi,
1708 afex_stats->tx_broadcast_frames_lo,
1709 qstats->total_broadcast_packets_transmitted_lo);
1710
1711 ADD_64(afex_stats->tx_multicast_frames_hi,
1712 qstats->total_multicast_packets_transmitted_hi,
1713 afex_stats->tx_multicast_frames_lo,
1714 qstats->total_multicast_packets_transmitted_lo);
1715
1716 ADD_64(afex_stats->tx_frames_dropped_hi,
1717 qstats->total_transmitted_dropped_packets_error_hi,
1718 afex_stats->tx_frames_dropped_lo,
1719 qstats->total_transmitted_dropped_packets_error_lo);
1720 }
1721
1722 /* now add FCoE statistics which are collected separately
1723 * (both offloaded and non offloaded)
1724 */
1725 if (!NO_FCOE(bp)) {
1726 ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
1727 LE32_0,
1728 afex_stats->rx_unicast_bytes_lo,
1729 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
1730
1731 ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
1732 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
1733 afex_stats->rx_unicast_bytes_lo,
1734 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
1735
1736 ADD_64_LE(afex_stats->rx_broadcast_bytes_hi,
1737 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
1738 afex_stats->rx_broadcast_bytes_lo,
1739 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
1740
1741 ADD_64_LE(afex_stats->rx_multicast_bytes_hi,
1742 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
1743 afex_stats->rx_multicast_bytes_lo,
1744 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
1745
1746 ADD_64_LE(afex_stats->rx_unicast_frames_hi,
1747 LE32_0,
1748 afex_stats->rx_unicast_frames_lo,
1749 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
1750
1751 ADD_64_LE(afex_stats->rx_unicast_frames_hi,
1752 LE32_0,
1753 afex_stats->rx_unicast_frames_lo,
1754 fcoe_q_tstorm_stats->rcv_ucast_pkts);
1755
1756 ADD_64_LE(afex_stats->rx_broadcast_frames_hi,
1757 LE32_0,
1758 afex_stats->rx_broadcast_frames_lo,
1759 fcoe_q_tstorm_stats->rcv_bcast_pkts);
1760
1761 ADD_64_LE(afex_stats->rx_multicast_frames_hi,
1762 LE32_0,
1763 afex_stats->rx_multicast_frames_lo,
1764 fcoe_q_tstorm_stats->rcv_ucast_pkts);
1765
1766 ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1767 LE32_0,
1768 afex_stats->rx_frames_discarded_lo,
1769 fcoe_q_tstorm_stats->checksum_discard);
1770
1771 ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1772 LE32_0,
1773 afex_stats->rx_frames_discarded_lo,
1774 fcoe_q_tstorm_stats->pkts_too_big_discard);
1775
1776 ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1777 LE32_0,
1778 afex_stats->rx_frames_discarded_lo,
1779 fcoe_q_tstorm_stats->ttl0_discard);
1780
1781 ADD_64_LE16(afex_stats->rx_frames_dropped_hi,
1782 LE16_0,
1783 afex_stats->rx_frames_dropped_lo,
1784 fcoe_q_tstorm_stats->no_buff_discard);
1785
1786 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1787 LE32_0,
1788 afex_stats->rx_frames_dropped_lo,
1789 fcoe_q_ustorm_stats->ucast_no_buff_pkts);
1790
1791 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1792 LE32_0,
1793 afex_stats->rx_frames_dropped_lo,
1794 fcoe_q_ustorm_stats->mcast_no_buff_pkts);
1795
1796 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1797 LE32_0,
1798 afex_stats->rx_frames_dropped_lo,
1799 fcoe_q_ustorm_stats->bcast_no_buff_pkts);
1800
1801 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1802 LE32_0,
1803 afex_stats->rx_frames_dropped_lo,
1804 fw_fcoe_stat->rx_stat1.fcoe_rx_drop_pkt_cnt);
1805
1806 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1807 LE32_0,
1808 afex_stats->rx_frames_dropped_lo,
1809 fw_fcoe_stat->rx_stat2.fcoe_rx_drop_pkt_cnt);
1810
1811 ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
1812 LE32_0,
1813 afex_stats->tx_unicast_bytes_lo,
1814 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
1815
1816 ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
1817 fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
1818 afex_stats->tx_unicast_bytes_lo,
1819 fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
1820
1821 ADD_64_LE(afex_stats->tx_broadcast_bytes_hi,
1822 fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
1823 afex_stats->tx_broadcast_bytes_lo,
1824 fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
1825
1826 ADD_64_LE(afex_stats->tx_multicast_bytes_hi,
1827 fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
1828 afex_stats->tx_multicast_bytes_lo,
1829 fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
1830
1831 ADD_64_LE(afex_stats->tx_unicast_frames_hi,
1832 LE32_0,
1833 afex_stats->tx_unicast_frames_lo,
1834 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
1835
1836 ADD_64_LE(afex_stats->tx_unicast_frames_hi,
1837 LE32_0,
1838 afex_stats->tx_unicast_frames_lo,
1839 fcoe_q_xstorm_stats->ucast_pkts_sent);
1840
1841 ADD_64_LE(afex_stats->tx_broadcast_frames_hi,
1842 LE32_0,
1843 afex_stats->tx_broadcast_frames_lo,
1844 fcoe_q_xstorm_stats->bcast_pkts_sent);
1845
1846 ADD_64_LE(afex_stats->tx_multicast_frames_hi,
1847 LE32_0,
1848 afex_stats->tx_multicast_frames_lo,
1849 fcoe_q_xstorm_stats->mcast_pkts_sent);
1850
1851 ADD_64_LE(afex_stats->tx_frames_dropped_hi,
1852 LE32_0,
1853 afex_stats->tx_frames_dropped_lo,
1854 fcoe_q_xstorm_stats->error_drop_pkts);
1855 }
1856
1857 /* if port stats are requested, add them to the PMF
1858 * stats, as anyway they will be accumulated by the
1859 * MCP before sent to the switch
1860 */
1861 if ((bp->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) {
1862 ADD_64(afex_stats->rx_frames_dropped_hi,
1863 0,
1864 afex_stats->rx_frames_dropped_lo,
1865 estats->mac_filter_discard);
1866 ADD_64(afex_stats->rx_frames_dropped_hi,
1867 0,
1868 afex_stats->rx_frames_dropped_lo,
1869 estats->brb_truncate_discard);
1870 ADD_64(afex_stats->rx_frames_discarded_hi,
1871 0,
1872 afex_stats->rx_frames_discarded_lo,
1873 estats->mac_discard);
1874 }
1875}