blob: ca1effceff6f530c1769d40b07b19bb272ed3752 [file] [log] [blame]
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001/* bnx2x_stats.c: Broadcom Everest network driver.
2 *
Ariel Elior85b26ea2012-01-26 06:01:54 +00003 * Copyright (c) 2007-2012 Broadcom Corporation
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
Joe Perchesf1deab52011-08-14 12:16:21 +000017
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Dmitry Kravkovf85582f2010-10-06 03:34:21 +000020#include "bnx2x_stats.h"
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030021#include "bnx2x_cmn.h"
22
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000023
24/* Statistics */
25
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000026/*
27 * General service functions
28 */
29
30static inline long bnx2x_hilo(u32 *hiref)
31{
32 u32 lo = *(hiref + 1);
33#if (BITS_PER_LONG == 64)
34 u32 hi = *hiref;
35
36 return HILO_U64(hi, lo);
37#else
38 return lo;
39#endif
40}
41
Barak Witkowski1d187b32011-12-05 22:41:50 +000042static u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp)
43{
44 u16 res = sizeof(struct host_port_stats) >> 2;
45
46 /* if PFC stats are not supported by the MFW, don't DMA them */
47 if (!(bp->flags & BC_SUPPORTS_PFC_STATS))
48 res -= (sizeof(u32)*4) >> 2;
49
50 return res;
51}
52
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000053/*
54 * Init service functions
55 */
56
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030057/* Post the next statistics ramrod. Protect it with the spin in
58 * order to ensure the strict order between statistics ramrods
59 * (each ramrod has a sequence number passed in a
60 * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be
61 * sent in order).
62 */
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000063static void bnx2x_storm_stats_post(struct bnx2x *bp)
64{
65 if (!bp->stats_pending) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030066 int rc;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000067
David S. Millerbb7e95c2010-07-27 21:01:35 -070068 spin_lock_bh(&bp->stats_lock);
69
Vladislav Zolotarov9bcb8012011-01-09 02:20:34 +000070 if (bp->stats_pending) {
71 spin_unlock_bh(&bp->stats_lock);
72 return;
73 }
74
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030075 bp->fw_stats_req->hdr.drv_stats_counter =
76 cpu_to_le16(bp->stats_counter++);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000077
Merav Sicron51c1a582012-03-18 10:33:38 +000078 DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030079 bp->fw_stats_req->hdr.drv_stats_counter);
80
81
82
83 /* send FW stats ramrod */
Dmitry Kravkov523224a2010-10-06 03:23:26 +000084 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030085 U64_HI(bp->fw_stats_req_mapping),
86 U64_LO(bp->fw_stats_req_mapping),
87 NONE_CONNECTION_TYPE);
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +000088 if (rc == 0)
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000089 bp->stats_pending = 1;
David S. Millerbb7e95c2010-07-27 21:01:35 -070090
91 spin_unlock_bh(&bp->stats_lock);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000092 }
93}
94
95static void bnx2x_hw_stats_post(struct bnx2x *bp)
96{
97 struct dmae_command *dmae = &bp->stats_dmae;
98 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
99
100 *stats_comp = DMAE_COMP_VAL;
101 if (CHIP_REV_IS_SLOW(bp))
102 return;
103
104 /* loader */
105 if (bp->executer_idx) {
106 int loader_idx = PMF_DMAE_C(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000107 u32 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
108 true, DMAE_COMP_GRC);
109 opcode = bnx2x_dmae_opcode_clr_src_reset(opcode);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000110
111 memset(dmae, 0, sizeof(struct dmae_command));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000112 dmae->opcode = opcode;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000113 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
114 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
115 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
116 sizeof(struct dmae_command) *
117 (loader_idx + 1)) >> 2;
118 dmae->dst_addr_hi = 0;
119 dmae->len = sizeof(struct dmae_command) >> 2;
120 if (CHIP_IS_E1(bp))
121 dmae->len--;
122 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
123 dmae->comp_addr_hi = 0;
124 dmae->comp_val = 1;
125
126 *stats_comp = 0;
127 bnx2x_post_dmae(bp, dmae, loader_idx);
128
129 } else if (bp->func_stx) {
130 *stats_comp = 0;
131 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
132 }
133}
134
135static int bnx2x_stats_comp(struct bnx2x *bp)
136{
137 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
138 int cnt = 10;
139
140 might_sleep();
141 while (*stats_comp != DMAE_COMP_VAL) {
142 if (!cnt) {
143 BNX2X_ERR("timeout waiting for stats finished\n");
144 break;
145 }
146 cnt--;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300147 usleep_range(1000, 1000);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000148 }
149 return 1;
150}
151
152/*
153 * Statistics service functions
154 */
155
156static void bnx2x_stats_pmf_update(struct bnx2x *bp)
157{
158 struct dmae_command *dmae;
159 u32 opcode;
160 int loader_idx = PMF_DMAE_C(bp);
161 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
162
163 /* sanity */
Mintz Yuval1355b702012-02-15 02:10:22 +0000164 if (!bp->port.pmf || !bp->port.port_stx) {
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000165 BNX2X_ERR("BUG!\n");
166 return;
167 }
168
169 bp->executer_idx = 0;
170
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000171 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000172
173 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000174 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000175 dmae->src_addr_lo = bp->port.port_stx >> 2;
176 dmae->src_addr_hi = 0;
177 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
178 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
179 dmae->len = DMAE_LEN32_RD_MAX;
180 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
181 dmae->comp_addr_hi = 0;
182 dmae->comp_val = 1;
183
184 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000185 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000186 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
187 dmae->src_addr_hi = 0;
188 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
189 DMAE_LEN32_RD_MAX * 4);
190 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
191 DMAE_LEN32_RD_MAX * 4);
Barak Witkowski1d187b32011-12-05 22:41:50 +0000192 dmae->len = bnx2x_get_port_stats_dma_len(bp) - DMAE_LEN32_RD_MAX;
193
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000194 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
195 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
196 dmae->comp_val = DMAE_COMP_VAL;
197
198 *stats_comp = 0;
199 bnx2x_hw_stats_post(bp);
200 bnx2x_stats_comp(bp);
201}
202
203static void bnx2x_port_stats_init(struct bnx2x *bp)
204{
205 struct dmae_command *dmae;
206 int port = BP_PORT(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000207 u32 opcode;
208 int loader_idx = PMF_DMAE_C(bp);
209 u32 mac_addr;
210 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
211
212 /* sanity */
213 if (!bp->link_vars.link_up || !bp->port.pmf) {
214 BNX2X_ERR("BUG!\n");
215 return;
216 }
217
218 bp->executer_idx = 0;
219
220 /* MCP */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000221 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
222 true, DMAE_COMP_GRC);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000223
224 if (bp->port.port_stx) {
225
226 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
227 dmae->opcode = opcode;
228 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
229 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
230 dmae->dst_addr_lo = bp->port.port_stx >> 2;
231 dmae->dst_addr_hi = 0;
Barak Witkowski1d187b32011-12-05 22:41:50 +0000232 dmae->len = bnx2x_get_port_stats_dma_len(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000233 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
234 dmae->comp_addr_hi = 0;
235 dmae->comp_val = 1;
236 }
237
238 if (bp->func_stx) {
239
240 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
241 dmae->opcode = opcode;
242 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
243 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
244 dmae->dst_addr_lo = bp->func_stx >> 2;
245 dmae->dst_addr_hi = 0;
246 dmae->len = sizeof(struct host_func_stats) >> 2;
247 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
248 dmae->comp_addr_hi = 0;
249 dmae->comp_val = 1;
250 }
251
252 /* MAC */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000253 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
254 true, DMAE_COMP_GRC);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000255
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300256 /* EMAC is special */
257 if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000258 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
259
260 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
261 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
262 dmae->opcode = opcode;
263 dmae->src_addr_lo = (mac_addr +
264 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
265 dmae->src_addr_hi = 0;
266 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
267 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
268 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
269 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
270 dmae->comp_addr_hi = 0;
271 dmae->comp_val = 1;
272
273 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
274 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
275 dmae->opcode = opcode;
276 dmae->src_addr_lo = (mac_addr +
277 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
278 dmae->src_addr_hi = 0;
279 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
280 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
281 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
282 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
283 dmae->len = 1;
284 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
285 dmae->comp_addr_hi = 0;
286 dmae->comp_val = 1;
287
288 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
289 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
290 dmae->opcode = opcode;
291 dmae->src_addr_lo = (mac_addr +
292 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
293 dmae->src_addr_hi = 0;
294 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
295 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
296 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
297 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
298 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
299 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
300 dmae->comp_addr_hi = 0;
301 dmae->comp_val = 1;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300302 } else {
303 u32 tx_src_addr_lo, rx_src_addr_lo;
304 u16 rx_len, tx_len;
305
306 /* configure the params according to MAC type */
307 switch (bp->link_vars.mac_type) {
308 case MAC_TYPE_BMAC:
309 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
310 NIG_REG_INGRESS_BMAC0_MEM);
311
312 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
313 BIGMAC_REGISTER_TX_STAT_GTBYT */
314 if (CHIP_IS_E1x(bp)) {
315 tx_src_addr_lo = (mac_addr +
316 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
317 tx_len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
318 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
319 rx_src_addr_lo = (mac_addr +
320 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
321 rx_len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
322 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
323 } else {
324 tx_src_addr_lo = (mac_addr +
325 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
326 tx_len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
327 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
328 rx_src_addr_lo = (mac_addr +
329 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
330 rx_len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
331 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
332 }
333 break;
334
335 case MAC_TYPE_UMAC: /* handled by MSTAT */
336 case MAC_TYPE_XMAC: /* handled by MSTAT */
337 default:
338 mac_addr = port ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0;
339 tx_src_addr_lo = (mac_addr +
340 MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2;
341 rx_src_addr_lo = (mac_addr +
342 MSTAT_REG_RX_STAT_GR64_LO) >> 2;
343 tx_len = sizeof(bp->slowpath->
344 mac_stats.mstat_stats.stats_tx) >> 2;
345 rx_len = sizeof(bp->slowpath->
346 mac_stats.mstat_stats.stats_rx) >> 2;
347 break;
348 }
349
350 /* TX stats */
351 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
352 dmae->opcode = opcode;
353 dmae->src_addr_lo = tx_src_addr_lo;
354 dmae->src_addr_hi = 0;
355 dmae->len = tx_len;
356 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
357 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
358 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
359 dmae->comp_addr_hi = 0;
360 dmae->comp_val = 1;
361
362 /* RX stats */
363 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
364 dmae->opcode = opcode;
365 dmae->src_addr_hi = 0;
366 dmae->src_addr_lo = rx_src_addr_lo;
367 dmae->dst_addr_lo =
368 U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
369 dmae->dst_addr_hi =
370 U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
371 dmae->len = rx_len;
372 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
373 dmae->comp_addr_hi = 0;
374 dmae->comp_val = 1;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000375 }
376
377 /* NIG */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300378 if (!CHIP_IS_E3(bp)) {
379 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
380 dmae->opcode = opcode;
381 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
382 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
383 dmae->src_addr_hi = 0;
384 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
385 offsetof(struct nig_stats, egress_mac_pkt0_lo));
386 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
387 offsetof(struct nig_stats, egress_mac_pkt0_lo));
388 dmae->len = (2*sizeof(u32)) >> 2;
389 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
390 dmae->comp_addr_hi = 0;
391 dmae->comp_val = 1;
392
393 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
394 dmae->opcode = opcode;
395 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
396 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
397 dmae->src_addr_hi = 0;
398 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
399 offsetof(struct nig_stats, egress_mac_pkt1_lo));
400 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
401 offsetof(struct nig_stats, egress_mac_pkt1_lo));
402 dmae->len = (2*sizeof(u32)) >> 2;
403 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
404 dmae->comp_addr_hi = 0;
405 dmae->comp_val = 1;
406 }
407
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000408 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300409 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
410 true, DMAE_COMP_PCI);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000411 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
412 NIG_REG_STAT0_BRB_DISCARD) >> 2;
413 dmae->src_addr_hi = 0;
414 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
415 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
416 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000417
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000418 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
419 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
420 dmae->comp_val = DMAE_COMP_VAL;
421
422 *stats_comp = 0;
423}
424
425static void bnx2x_func_stats_init(struct bnx2x *bp)
426{
427 struct dmae_command *dmae = &bp->stats_dmae;
428 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
429
430 /* sanity */
431 if (!bp->func_stx) {
432 BNX2X_ERR("BUG!\n");
433 return;
434 }
435
436 bp->executer_idx = 0;
437 memset(dmae, 0, sizeof(struct dmae_command));
438
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000439 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
440 true, DMAE_COMP_PCI);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000441 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
442 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
443 dmae->dst_addr_lo = bp->func_stx >> 2;
444 dmae->dst_addr_hi = 0;
445 dmae->len = sizeof(struct host_func_stats) >> 2;
446 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
447 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
448 dmae->comp_val = DMAE_COMP_VAL;
449
450 *stats_comp = 0;
451}
452
453static void bnx2x_stats_start(struct bnx2x *bp)
454{
455 if (bp->port.pmf)
456 bnx2x_port_stats_init(bp);
457
458 else if (bp->func_stx)
459 bnx2x_func_stats_init(bp);
460
461 bnx2x_hw_stats_post(bp);
462 bnx2x_storm_stats_post(bp);
463}
464
465static void bnx2x_stats_pmf_start(struct bnx2x *bp)
466{
467 bnx2x_stats_comp(bp);
468 bnx2x_stats_pmf_update(bp);
469 bnx2x_stats_start(bp);
470}
471
472static void bnx2x_stats_restart(struct bnx2x *bp)
473{
474 bnx2x_stats_comp(bp);
475 bnx2x_stats_start(bp);
476}
477
478static void bnx2x_bmac_stats_update(struct bnx2x *bp)
479{
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000480 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
481 struct bnx2x_eth_stats *estats = &bp->eth_stats;
482 struct {
483 u32 lo;
484 u32 hi;
485 } diff;
486
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000487 if (CHIP_IS_E1x(bp)) {
488 struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats);
489
490 /* the macros below will use "bmac1_stats" type */
491 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
492 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
493 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
494 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
495 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
496 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
497 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
498 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300499 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
500
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000501 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
502 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
503 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
504 UPDATE_STAT64(tx_stat_gt127,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000505 tx_stat_etherstatspkts65octetsto127octets);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000506 UPDATE_STAT64(tx_stat_gt255,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000507 tx_stat_etherstatspkts128octetsto255octets);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000508 UPDATE_STAT64(tx_stat_gt511,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000509 tx_stat_etherstatspkts256octetsto511octets);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000510 UPDATE_STAT64(tx_stat_gt1023,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000511 tx_stat_etherstatspkts512octetsto1023octets);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000512 UPDATE_STAT64(tx_stat_gt1518,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000513 tx_stat_etherstatspkts1024octetsto1522octets);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300514 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
515 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
516 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
517 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000518 UPDATE_STAT64(tx_stat_gterr,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000519 tx_stat_dot3statsinternalmactransmiterrors);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300520 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000521
522 } else {
523 struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats);
524
525 /* the macros below will use "bmac2_stats" type */
526 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
527 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
528 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
529 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
530 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
531 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
532 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
533 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300534 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000535 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
536 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
537 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
538 UPDATE_STAT64(tx_stat_gt127,
539 tx_stat_etherstatspkts65octetsto127octets);
540 UPDATE_STAT64(tx_stat_gt255,
541 tx_stat_etherstatspkts128octetsto255octets);
542 UPDATE_STAT64(tx_stat_gt511,
543 tx_stat_etherstatspkts256octetsto511octets);
544 UPDATE_STAT64(tx_stat_gt1023,
545 tx_stat_etherstatspkts512octetsto1023octets);
546 UPDATE_STAT64(tx_stat_gt1518,
547 tx_stat_etherstatspkts1024octetsto1522octets);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300548 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
549 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
550 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
551 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000552 UPDATE_STAT64(tx_stat_gterr,
553 tx_stat_dot3statsinternalmactransmiterrors);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300554 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000555
556 /* collect PFC stats */
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000557 pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
558 pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000559
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000560 pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
561 pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000562 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000563
564 estats->pause_frames_received_hi =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300565 pstats->mac_stx[1].rx_stat_mac_xpf_hi;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000566 estats->pause_frames_received_lo =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300567 pstats->mac_stx[1].rx_stat_mac_xpf_lo;
568
569 estats->pause_frames_sent_hi =
570 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
571 estats->pause_frames_sent_lo =
572 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000573
574 estats->pfc_frames_received_hi =
575 pstats->pfc_frames_rx_hi;
576 estats->pfc_frames_received_lo =
577 pstats->pfc_frames_rx_lo;
578 estats->pfc_frames_sent_hi =
579 pstats->pfc_frames_tx_hi;
580 estats->pfc_frames_sent_lo =
581 pstats->pfc_frames_tx_lo;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300582}
583
584static void bnx2x_mstat_stats_update(struct bnx2x *bp)
585{
586 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
587 struct bnx2x_eth_stats *estats = &bp->eth_stats;
588
589 struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats);
590
591 ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets);
592 ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors);
593 ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts);
594 ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong);
595 ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments);
596 ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived);
597 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered);
598 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf);
599 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
600 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
601
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000602 /* collect pfc stats */
603 ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi,
604 pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo);
605 ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi,
606 pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300607
608 ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
609 ADD_STAT64(stats_tx.tx_gt127,
610 tx_stat_etherstatspkts65octetsto127octets);
611 ADD_STAT64(stats_tx.tx_gt255,
612 tx_stat_etherstatspkts128octetsto255octets);
613 ADD_STAT64(stats_tx.tx_gt511,
614 tx_stat_etherstatspkts256octetsto511octets);
615 ADD_STAT64(stats_tx.tx_gt1023,
616 tx_stat_etherstatspkts512octetsto1023octets);
617 ADD_STAT64(stats_tx.tx_gt1518,
618 tx_stat_etherstatspkts1024octetsto1522octets);
619 ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047);
620
621 ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095);
622 ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216);
623 ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383);
624
625 ADD_STAT64(stats_tx.tx_gterr,
626 tx_stat_dot3statsinternalmactransmiterrors);
627 ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl);
628
Mintz Yuval1355b702012-02-15 02:10:22 +0000629 estats->etherstatspkts1024octetsto1522octets_hi =
630 pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi;
631 estats->etherstatspkts1024octetsto1522octets_lo =
632 pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo;
633
634 estats->etherstatspktsover1522octets_hi =
635 pstats->mac_stx[1].tx_stat_mac_2047_hi;
636 estats->etherstatspktsover1522octets_lo =
637 pstats->mac_stx[1].tx_stat_mac_2047_lo;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300638
639 ADD_64(estats->etherstatspktsover1522octets_hi,
Mintz Yuval1355b702012-02-15 02:10:22 +0000640 pstats->mac_stx[1].tx_stat_mac_4095_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300641 estats->etherstatspktsover1522octets_lo,
Mintz Yuval1355b702012-02-15 02:10:22 +0000642 pstats->mac_stx[1].tx_stat_mac_4095_lo);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300643
644 ADD_64(estats->etherstatspktsover1522octets_hi,
Mintz Yuval1355b702012-02-15 02:10:22 +0000645 pstats->mac_stx[1].tx_stat_mac_9216_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300646 estats->etherstatspktsover1522octets_lo,
Mintz Yuval1355b702012-02-15 02:10:22 +0000647 pstats->mac_stx[1].tx_stat_mac_9216_lo);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300648
649 ADD_64(estats->etherstatspktsover1522octets_hi,
Mintz Yuval1355b702012-02-15 02:10:22 +0000650 pstats->mac_stx[1].tx_stat_mac_16383_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300651 estats->etherstatspktsover1522octets_lo,
Mintz Yuval1355b702012-02-15 02:10:22 +0000652 pstats->mac_stx[1].tx_stat_mac_16383_lo);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300653
654 estats->pause_frames_received_hi =
655 pstats->mac_stx[1].rx_stat_mac_xpf_hi;
656 estats->pause_frames_received_lo =
657 pstats->mac_stx[1].rx_stat_mac_xpf_lo;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000658
659 estats->pause_frames_sent_hi =
660 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
661 estats->pause_frames_sent_lo =
662 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000663
664 estats->pfc_frames_received_hi =
665 pstats->pfc_frames_rx_hi;
666 estats->pfc_frames_received_lo =
667 pstats->pfc_frames_rx_lo;
668 estats->pfc_frames_sent_hi =
669 pstats->pfc_frames_tx_hi;
670 estats->pfc_frames_sent_lo =
671 pstats->pfc_frames_tx_lo;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000672}
673
674static void bnx2x_emac_stats_update(struct bnx2x *bp)
675{
676 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
677 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
678 struct bnx2x_eth_stats *estats = &bp->eth_stats;
679
680 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
681 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
682 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
683 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
684 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
685 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
686 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
687 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
688 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
689 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
690 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
691 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
692 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
693 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
694 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
695 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
696 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
697 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
698 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
699 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
700 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
701 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
702 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
703 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
704 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
705 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
706 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
707 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
708 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
709 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
710 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
711
712 estats->pause_frames_received_hi =
713 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
714 estats->pause_frames_received_lo =
715 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
716 ADD_64(estats->pause_frames_received_hi,
717 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
718 estats->pause_frames_received_lo,
719 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
720
721 estats->pause_frames_sent_hi =
722 pstats->mac_stx[1].tx_stat_outxonsent_hi;
723 estats->pause_frames_sent_lo =
724 pstats->mac_stx[1].tx_stat_outxonsent_lo;
725 ADD_64(estats->pause_frames_sent_hi,
726 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
727 estats->pause_frames_sent_lo,
728 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
729}
730
731static int bnx2x_hw_stats_update(struct bnx2x *bp)
732{
733 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
734 struct nig_stats *old = &(bp->port.old_nig_stats);
735 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
736 struct bnx2x_eth_stats *estats = &bp->eth_stats;
737 struct {
738 u32 lo;
739 u32 hi;
740 } diff;
741
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300742 switch (bp->link_vars.mac_type) {
743 case MAC_TYPE_BMAC:
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000744 bnx2x_bmac_stats_update(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300745 break;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000746
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300747 case MAC_TYPE_EMAC:
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000748 bnx2x_emac_stats_update(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300749 break;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000750
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300751 case MAC_TYPE_UMAC:
752 case MAC_TYPE_XMAC:
753 bnx2x_mstat_stats_update(bp);
754 break;
755
756 case MAC_TYPE_NONE: /* unreached */
David S. Miller8decf862011-09-22 03:23:13 -0400757 DP(BNX2X_MSG_STATS,
758 "stats updated by DMAE but no MAC active\n");
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000759 return -1;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300760
761 default: /* unreached */
762 BNX2X_ERR("Unknown MAC type\n");
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000763 }
764
765 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
766 new->brb_discard - old->brb_discard);
767 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
768 new->brb_truncate - old->brb_truncate);
769
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300770 if (!CHIP_IS_E3(bp)) {
771 UPDATE_STAT64_NIG(egress_mac_pkt0,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000772 etherstatspkts1024octetsto1522octets);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300773 UPDATE_STAT64_NIG(egress_mac_pkt1,
774 etherstatspktsover1522octets);
775 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000776
777 memcpy(old, new, sizeof(struct nig_stats));
778
779 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
780 sizeof(struct mac_stx));
781 estats->brb_drop_hi = pstats->brb_drop_hi;
782 estats->brb_drop_lo = pstats->brb_drop_lo;
783
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000784 pstats->host_port_stats_counter++;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000785
786 if (!BP_NOMCP(bp)) {
787 u32 nig_timer_max =
788 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
789 if (nig_timer_max != estats->nig_timer_max) {
790 estats->nig_timer_max = nig_timer_max;
791 BNX2X_ERR("NIG timer max (%u)\n",
792 estats->nig_timer_max);
793 }
794 }
795
796 return 0;
797}
798
799static int bnx2x_storm_stats_update(struct bnx2x *bp)
800{
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000801 struct tstorm_per_port_stats *tport =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300802 &bp->fw_stats_data->port.tstorm_port_statistics;
803 struct tstorm_per_pf_stats *tfunc =
804 &bp->fw_stats_data->pf.tstorm_pf_statistics;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000805 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
806 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Mintz Yuval1355b702012-02-15 02:10:22 +0000807 struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300808 struct stats_counter *counters = &bp->fw_stats_data->storm_counters;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000809 int i;
David S. Millerbb7e95c2010-07-27 21:01:35 -0700810 u16 cur_stats_counter;
811
812 /* Make sure we use the value of the counter
813 * used for sending the last stats ramrod.
814 */
815 spin_lock_bh(&bp->stats_lock);
816 cur_stats_counter = bp->stats_counter - 1;
817 spin_unlock_bh(&bp->stats_lock);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000818
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300819 /* are storm stats valid? */
820 if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000821 DP(BNX2X_MSG_STATS,
822 "stats not updated by xstorm xstorm counter (0x%x) != stats_counter (0x%x)\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300823 le16_to_cpu(counters->xstats_counter), bp->stats_counter);
824 return -EAGAIN;
825 }
826
827 if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000828 DP(BNX2X_MSG_STATS,
829 "stats not updated by ustorm ustorm counter (0x%x) != stats_counter (0x%x)\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300830 le16_to_cpu(counters->ustats_counter), bp->stats_counter);
831 return -EAGAIN;
832 }
833
834 if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000835 DP(BNX2X_MSG_STATS,
836 "stats not updated by cstorm cstorm counter (0x%x) != stats_counter (0x%x)\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300837 le16_to_cpu(counters->cstats_counter), bp->stats_counter);
838 return -EAGAIN;
839 }
840
841 if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000842 DP(BNX2X_MSG_STATS,
843 "stats not updated by tstorm tstorm counter (0x%x) != stats_counter (0x%x)\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300844 le16_to_cpu(counters->tstats_counter), bp->stats_counter);
845 return -EAGAIN;
846 }
847
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000848 estats->error_bytes_received_hi = 0;
849 estats->error_bytes_received_lo = 0;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000850
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000851 for_each_eth_queue(bp, i) {
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000852 struct bnx2x_fastpath *fp = &bp->fp[i];
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300853 struct tstorm_per_queue_stats *tclient =
854 &bp->fw_stats_data->queue_stats[i].
855 tstorm_queue_statistics;
856 struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient;
857 struct ustorm_per_queue_stats *uclient =
858 &bp->fw_stats_data->queue_stats[i].
859 ustorm_queue_statistics;
860 struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient;
861 struct xstorm_per_queue_stats *xclient =
862 &bp->fw_stats_data->queue_stats[i].
863 xstorm_queue_statistics;
864 struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000865 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
Mintz Yuval1355b702012-02-15 02:10:22 +0000866 struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
867
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000868 u32 diff;
869
Merav Sicron51c1a582012-03-18 10:33:38 +0000870 DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, bcast_sent 0x%x mcast_sent 0x%x\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300871 i, xclient->ucast_pkts_sent,
872 xclient->bcast_pkts_sent, xclient->mcast_pkts_sent);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000873
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300874 DP(BNX2X_MSG_STATS, "---------------\n");
875
Mintz Yuval1355b702012-02-15 02:10:22 +0000876 UPDATE_QSTAT(tclient->rcv_bcast_bytes,
877 total_broadcast_bytes_received);
878 UPDATE_QSTAT(tclient->rcv_mcast_bytes,
879 total_multicast_bytes_received);
880 UPDATE_QSTAT(tclient->rcv_ucast_bytes,
881 total_unicast_bytes_received);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300882
883 /*
884 * sum to total_bytes_received all
885 * unicast/multicast/broadcast
886 */
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000887 qstats->total_bytes_received_hi =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300888 qstats->total_broadcast_bytes_received_hi;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000889 qstats->total_bytes_received_lo =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300890 qstats->total_broadcast_bytes_received_lo;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000891
892 ADD_64(qstats->total_bytes_received_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300893 qstats->total_multicast_bytes_received_hi,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000894 qstats->total_bytes_received_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300895 qstats->total_multicast_bytes_received_lo);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000896
897 ADD_64(qstats->total_bytes_received_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300898 qstats->total_unicast_bytes_received_hi,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000899 qstats->total_bytes_received_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300900 qstats->total_unicast_bytes_received_lo);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000901
902 qstats->valid_bytes_received_hi =
903 qstats->total_bytes_received_hi;
904 qstats->valid_bytes_received_lo =
905 qstats->total_bytes_received_lo;
906
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000907
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300908 UPDATE_EXTEND_TSTAT(rcv_ucast_pkts,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000909 total_unicast_packets_received);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300910 UPDATE_EXTEND_TSTAT(rcv_mcast_pkts,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000911 total_multicast_packets_received);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300912 UPDATE_EXTEND_TSTAT(rcv_bcast_pkts,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000913 total_broadcast_packets_received);
Mintz Yuval1355b702012-02-15 02:10:22 +0000914 UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard,
915 etherstatsoverrsizepkts);
916 UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000917
918 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
919 total_unicast_packets_received);
920 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
921 total_multicast_packets_received);
922 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
923 total_broadcast_packets_received);
Mintz Yuval1355b702012-02-15 02:10:22 +0000924 UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard);
925 UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard);
926 UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000927
Mintz Yuval1355b702012-02-15 02:10:22 +0000928 UPDATE_QSTAT(xclient->bcast_bytes_sent,
929 total_broadcast_bytes_transmitted);
930 UPDATE_QSTAT(xclient->mcast_bytes_sent,
931 total_multicast_bytes_transmitted);
932 UPDATE_QSTAT(xclient->ucast_bytes_sent,
933 total_unicast_bytes_transmitted);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300934
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300935 /*
936 * sum to total_bytes_transmitted all
937 * unicast/multicast/broadcast
938 */
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000939 qstats->total_bytes_transmitted_hi =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300940 qstats->total_unicast_bytes_transmitted_hi;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000941 qstats->total_bytes_transmitted_lo =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300942 qstats->total_unicast_bytes_transmitted_lo;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000943
944 ADD_64(qstats->total_bytes_transmitted_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300945 qstats->total_broadcast_bytes_transmitted_hi,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000946 qstats->total_bytes_transmitted_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300947 qstats->total_broadcast_bytes_transmitted_lo);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000948
949 ADD_64(qstats->total_bytes_transmitted_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300950 qstats->total_multicast_bytes_transmitted_hi,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000951 qstats->total_bytes_transmitted_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300952 qstats->total_multicast_bytes_transmitted_lo);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000953
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300954 UPDATE_EXTEND_XSTAT(ucast_pkts_sent,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000955 total_unicast_packets_transmitted);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300956 UPDATE_EXTEND_XSTAT(mcast_pkts_sent,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000957 total_multicast_packets_transmitted);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300958 UPDATE_EXTEND_XSTAT(bcast_pkts_sent,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000959 total_broadcast_packets_transmitted);
960
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300961 UPDATE_EXTEND_TSTAT(checksum_discard,
962 total_packets_received_checksum_discarded);
963 UPDATE_EXTEND_TSTAT(ttl0_discard,
964 total_packets_received_ttl0_discarded);
965
966 UPDATE_EXTEND_XSTAT(error_drop_pkts,
967 total_transmitted_dropped_packets_error);
968
969 /* TPA aggregations completed */
Mintz Yuval1355b702012-02-15 02:10:22 +0000970 UPDATE_EXTEND_E_USTAT(coalesced_events, total_tpa_aggregations);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300971 /* Number of network frames aggregated by TPA */
Mintz Yuval1355b702012-02-15 02:10:22 +0000972 UPDATE_EXTEND_E_USTAT(coalesced_pkts,
973 total_tpa_aggregated_frames);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300974 /* Total number of bytes in completed TPA aggregations */
Mintz Yuval1355b702012-02-15 02:10:22 +0000975 UPDATE_QSTAT(uclient->coalesced_bytes, total_tpa_bytes);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300976
Mintz Yuval1355b702012-02-15 02:10:22 +0000977 UPDATE_ESTAT_QSTAT_64(total_tpa_bytes);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000978
Mintz Yuval1355b702012-02-15 02:10:22 +0000979 UPDATE_FSTAT_QSTAT(total_bytes_received);
980 UPDATE_FSTAT_QSTAT(total_bytes_transmitted);
981 UPDATE_FSTAT_QSTAT(total_unicast_packets_received);
982 UPDATE_FSTAT_QSTAT(total_multicast_packets_received);
983 UPDATE_FSTAT_QSTAT(total_broadcast_packets_received);
984 UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted);
985 UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted);
986 UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted);
987 UPDATE_FSTAT_QSTAT(valid_bytes_received);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000988 }
989
Mintz Yuval1355b702012-02-15 02:10:22 +0000990 ADD_64(estats->total_bytes_received_hi,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000991 estats->rx_stat_ifhcinbadoctets_hi,
Mintz Yuval1355b702012-02-15 02:10:22 +0000992 estats->total_bytes_received_lo,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000993 estats->rx_stat_ifhcinbadoctets_lo);
994
Mintz Yuval1355b702012-02-15 02:10:22 +0000995 ADD_64(estats->total_bytes_received_hi,
Eric Dumazet66d885c2012-02-13 06:23:12 +0000996 le32_to_cpu(tfunc->rcv_error_bytes.hi),
Mintz Yuval1355b702012-02-15 02:10:22 +0000997 estats->total_bytes_received_lo,
Eric Dumazet66d885c2012-02-13 06:23:12 +0000998 le32_to_cpu(tfunc->rcv_error_bytes.lo));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300999
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001000 ADD_64(estats->error_bytes_received_hi,
Eric Dumazet66d885c2012-02-13 06:23:12 +00001001 le32_to_cpu(tfunc->rcv_error_bytes.hi),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001002 estats->error_bytes_received_lo,
Eric Dumazet66d885c2012-02-13 06:23:12 +00001003 le32_to_cpu(tfunc->rcv_error_bytes.lo));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001004
Mintz Yuval1355b702012-02-15 02:10:22 +00001005 UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong);
1006
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001007 ADD_64(estats->error_bytes_received_hi,
1008 estats->rx_stat_ifhcinbadoctets_hi,
1009 estats->error_bytes_received_lo,
1010 estats->rx_stat_ifhcinbadoctets_lo);
1011
1012 if (bp->port.pmf) {
Mintz Yuval1355b702012-02-15 02:10:22 +00001013 struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old;
1014 UPDATE_FW_STAT(mac_filter_discard);
1015 UPDATE_FW_STAT(mf_tag_discard);
1016 UPDATE_FW_STAT(brb_truncate_discard);
1017 UPDATE_FW_STAT(mac_discard);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001018 }
1019
1020 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
1021
1022 bp->stats_pending = 0;
1023
1024 return 0;
1025}
1026
1027static void bnx2x_net_stats_update(struct bnx2x *bp)
1028{
1029 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1030 struct net_device_stats *nstats = &bp->dev->stats;
Eric Dumazetc6a056a2010-08-23 19:44:08 +00001031 unsigned long tmp;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001032 int i;
1033
1034 nstats->rx_packets =
1035 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
1036 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
1037 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
1038
1039 nstats->tx_packets =
1040 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
1041 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
1042 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
1043
1044 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
1045
1046 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
1047
Eric Dumazetc6a056a2010-08-23 19:44:08 +00001048 tmp = estats->mac_discard;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001049 for_each_rx_queue(bp, i)
Eric Dumazetc6a056a2010-08-23 19:44:08 +00001050 tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
Mintz Yuval1355b702012-02-15 02:10:22 +00001051 nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001052
1053 nstats->tx_dropped = 0;
1054
1055 nstats->multicast =
1056 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
1057
1058 nstats->collisions =
1059 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
1060
1061 nstats->rx_length_errors =
1062 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
1063 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
1064 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
1065 bnx2x_hilo(&estats->brb_truncate_hi);
1066 nstats->rx_crc_errors =
1067 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
1068 nstats->rx_frame_errors =
1069 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
1070 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001071 nstats->rx_missed_errors = 0;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001072
1073 nstats->rx_errors = nstats->rx_length_errors +
1074 nstats->rx_over_errors +
1075 nstats->rx_crc_errors +
1076 nstats->rx_frame_errors +
1077 nstats->rx_fifo_errors +
1078 nstats->rx_missed_errors;
1079
1080 nstats->tx_aborted_errors =
1081 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
1082 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
1083 nstats->tx_carrier_errors =
1084 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
1085 nstats->tx_fifo_errors = 0;
1086 nstats->tx_heartbeat_errors = 0;
1087 nstats->tx_window_errors = 0;
1088
1089 nstats->tx_errors = nstats->tx_aborted_errors +
1090 nstats->tx_carrier_errors +
1091 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
1092}
1093
1094static void bnx2x_drv_stats_update(struct bnx2x *bp)
1095{
1096 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1097 int i;
1098
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001099 for_each_queue(bp, i) {
1100 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
Mintz Yuval1355b702012-02-15 02:10:22 +00001101 struct bnx2x_eth_q_stats_old *qstats_old =
1102 &bp->fp[i].eth_q_stats_old;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001103
Mintz Yuval1355b702012-02-15 02:10:22 +00001104 UPDATE_ESTAT_QSTAT(driver_xoff);
1105 UPDATE_ESTAT_QSTAT(rx_err_discard_pkt);
1106 UPDATE_ESTAT_QSTAT(rx_skb_alloc_failed);
1107 UPDATE_ESTAT_QSTAT(hw_csum_err);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001108 }
1109}
1110
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001111static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp)
1112{
1113 u32 val;
1114
1115 if (SHMEM2_HAS(bp, edebug_driver_if[1])) {
1116 val = SHMEM2_RD(bp, edebug_driver_if[1]);
1117
1118 if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT)
1119 return true;
1120 }
1121
1122 return false;
1123}
1124
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001125static void bnx2x_stats_update(struct bnx2x *bp)
1126{
1127 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1128
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001129 if (bnx2x_edebug_stats_stopped(bp))
1130 return;
1131
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001132 if (*stats_comp != DMAE_COMP_VAL)
1133 return;
1134
1135 if (bp->port.pmf)
1136 bnx2x_hw_stats_update(bp);
1137
1138 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
1139 BNX2X_ERR("storm stats were not updated for 3 times\n");
1140 bnx2x_panic();
1141 return;
1142 }
1143
1144 bnx2x_net_stats_update(bp);
1145 bnx2x_drv_stats_update(bp);
1146
1147 if (netif_msg_timer(bp)) {
1148 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001149
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001150 netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n",
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001151 estats->brb_drop_lo, estats->brb_truncate_lo);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001152 }
1153
1154 bnx2x_hw_stats_post(bp);
1155 bnx2x_storm_stats_post(bp);
1156}
1157
1158static void bnx2x_port_stats_stop(struct bnx2x *bp)
1159{
1160 struct dmae_command *dmae;
1161 u32 opcode;
1162 int loader_idx = PMF_DMAE_C(bp);
1163 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1164
1165 bp->executer_idx = 0;
1166
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001167 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001168
1169 if (bp->port.port_stx) {
1170
1171 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1172 if (bp->func_stx)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001173 dmae->opcode = bnx2x_dmae_opcode_add_comp(
1174 opcode, DMAE_COMP_GRC);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001175 else
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001176 dmae->opcode = bnx2x_dmae_opcode_add_comp(
1177 opcode, DMAE_COMP_PCI);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001178
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001179 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1180 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1181 dmae->dst_addr_lo = bp->port.port_stx >> 2;
1182 dmae->dst_addr_hi = 0;
Barak Witkowski1d187b32011-12-05 22:41:50 +00001183 dmae->len = bnx2x_get_port_stats_dma_len(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001184 if (bp->func_stx) {
1185 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
1186 dmae->comp_addr_hi = 0;
1187 dmae->comp_val = 1;
1188 } else {
1189 dmae->comp_addr_lo =
1190 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1191 dmae->comp_addr_hi =
1192 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1193 dmae->comp_val = DMAE_COMP_VAL;
1194
1195 *stats_comp = 0;
1196 }
1197 }
1198
1199 if (bp->func_stx) {
1200
1201 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001202 dmae->opcode =
1203 bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001204 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
1205 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
1206 dmae->dst_addr_lo = bp->func_stx >> 2;
1207 dmae->dst_addr_hi = 0;
1208 dmae->len = sizeof(struct host_func_stats) >> 2;
1209 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1210 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1211 dmae->comp_val = DMAE_COMP_VAL;
1212
1213 *stats_comp = 0;
1214 }
1215}
1216
1217static void bnx2x_stats_stop(struct bnx2x *bp)
1218{
1219 int update = 0;
1220
1221 bnx2x_stats_comp(bp);
1222
1223 if (bp->port.pmf)
1224 update = (bnx2x_hw_stats_update(bp) == 0);
1225
1226 update |= (bnx2x_storm_stats_update(bp) == 0);
1227
1228 if (update) {
1229 bnx2x_net_stats_update(bp);
1230
1231 if (bp->port.pmf)
1232 bnx2x_port_stats_stop(bp);
1233
1234 bnx2x_hw_stats_post(bp);
1235 bnx2x_stats_comp(bp);
1236 }
1237}
1238
1239static void bnx2x_stats_do_nothing(struct bnx2x *bp)
1240{
1241}
1242
1243static const struct {
1244 void (*action)(struct bnx2x *bp);
1245 enum bnx2x_stats_state next_state;
1246} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
1247/* state event */
1248{
1249/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
1250/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
1251/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
1252/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
1253},
1254{
1255/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
1256/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
1257/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
1258/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
1259}
1260};
1261
1262void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
1263{
David S. Millerbb7e95c2010-07-27 21:01:35 -07001264 enum bnx2x_stats_state state;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001265 if (unlikely(bp->panic))
1266 return;
Dmitry Kravkov4a025f42011-11-13 04:34:30 +00001267
David S. Millerbb7e95c2010-07-27 21:01:35 -07001268 spin_lock_bh(&bp->stats_lock);
1269 state = bp->stats_state;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001270 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
David S. Millerbb7e95c2010-07-27 21:01:35 -07001271 spin_unlock_bh(&bp->stats_lock);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001272
Dmitry Kravkov4a025f42011-11-13 04:34:30 +00001273 bnx2x_stats_stm[state][event].action(bp);
1274
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001275 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
1276 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
1277 state, event, bp->stats_state);
1278}
1279
1280static void bnx2x_port_stats_base_init(struct bnx2x *bp)
1281{
1282 struct dmae_command *dmae;
1283 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1284
1285 /* sanity */
1286 if (!bp->port.pmf || !bp->port.port_stx) {
1287 BNX2X_ERR("BUG!\n");
1288 return;
1289 }
1290
1291 bp->executer_idx = 0;
1292
1293 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001294 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
1295 true, DMAE_COMP_PCI);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001296 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1297 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1298 dmae->dst_addr_lo = bp->port.port_stx >> 2;
1299 dmae->dst_addr_hi = 0;
Barak Witkowski1d187b32011-12-05 22:41:50 +00001300 dmae->len = bnx2x_get_port_stats_dma_len(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001301 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1302 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1303 dmae->comp_val = DMAE_COMP_VAL;
1304
1305 *stats_comp = 0;
1306 bnx2x_hw_stats_post(bp);
1307 bnx2x_stats_comp(bp);
1308}
1309
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001310static void bnx2x_func_stats_base_update(struct bnx2x *bp)
1311{
1312 struct dmae_command *dmae = &bp->stats_dmae;
1313 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1314
1315 /* sanity */
1316 if (!bp->func_stx) {
1317 BNX2X_ERR("BUG!\n");
1318 return;
1319 }
1320
1321 bp->executer_idx = 0;
1322 memset(dmae, 0, sizeof(struct dmae_command));
1323
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001324 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
1325 true, DMAE_COMP_PCI);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001326 dmae->src_addr_lo = bp->func_stx >> 2;
1327 dmae->src_addr_hi = 0;
Mintz Yuval1355b702012-02-15 02:10:22 +00001328 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
1329 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001330 dmae->len = sizeof(struct host_func_stats) >> 2;
1331 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1332 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1333 dmae->comp_val = DMAE_COMP_VAL;
1334
1335 *stats_comp = 0;
1336 bnx2x_hw_stats_post(bp);
1337 bnx2x_stats_comp(bp);
1338}
1339
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001340/**
1341 * This function will prepare the statistics ramrod data the way
1342 * we will only have to increment the statistics counter and
1343 * send the ramrod each time we have to.
1344 *
1345 * @param bp
1346 */
1347static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
1348{
1349 int i;
Barak Witkowski50f0a562011-12-05 21:52:23 +00001350 int first_queue_query_index;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001351 struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr;
1352
1353 dma_addr_t cur_data_offset;
1354 struct stats_query_entry *cur_query_entry;
1355
1356 stats_hdr->cmd_num = bp->fw_stats_num;
1357 stats_hdr->drv_stats_counter = 0;
1358
1359 /* storm_counters struct contains the counters of completed
1360 * statistics requests per storm which are incremented by FW
1361 * each time it completes hadning a statistics ramrod. We will
1362 * check these counters in the timer handler and discard a
1363 * (statistics) ramrod completion.
1364 */
1365 cur_data_offset = bp->fw_stats_data_mapping +
1366 offsetof(struct bnx2x_fw_stats_data, storm_counters);
1367
1368 stats_hdr->stats_counters_addrs.hi =
1369 cpu_to_le32(U64_HI(cur_data_offset));
1370 stats_hdr->stats_counters_addrs.lo =
1371 cpu_to_le32(U64_LO(cur_data_offset));
1372
1373 /* prepare to the first stats ramrod (will be completed with
1374 * the counters equal to zero) - init counters to somethig different.
1375 */
1376 memset(&bp->fw_stats_data->storm_counters, 0xff,
1377 sizeof(struct stats_counter));
1378
1379 /**** Port FW statistics data ****/
1380 cur_data_offset = bp->fw_stats_data_mapping +
1381 offsetof(struct bnx2x_fw_stats_data, port);
1382
1383 cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX];
1384
1385 cur_query_entry->kind = STATS_TYPE_PORT;
1386 /* For port query index is a DONT CARE */
1387 cur_query_entry->index = BP_PORT(bp);
1388 /* For port query funcID is a DONT CARE */
1389 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1390 cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
1391 cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
1392
1393 /**** PF FW statistics data ****/
1394 cur_data_offset = bp->fw_stats_data_mapping +
1395 offsetof(struct bnx2x_fw_stats_data, pf);
1396
1397 cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX];
1398
1399 cur_query_entry->kind = STATS_TYPE_PF;
1400 /* For PF query index is a DONT CARE */
1401 cur_query_entry->index = BP_PORT(bp);
1402 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1403 cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
1404 cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
1405
Barak Witkowski50f0a562011-12-05 21:52:23 +00001406 /**** FCoE FW statistics data ****/
1407 if (!NO_FCOE(bp)) {
1408 cur_data_offset = bp->fw_stats_data_mapping +
1409 offsetof(struct bnx2x_fw_stats_data, fcoe);
1410
1411 cur_query_entry =
1412 &bp->fw_stats_req->query[BNX2X_FCOE_QUERY_IDX];
1413
1414 cur_query_entry->kind = STATS_TYPE_FCOE;
1415 /* For FCoE query index is a DONT CARE */
1416 cur_query_entry->index = BP_PORT(bp);
1417 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1418 cur_query_entry->address.hi =
1419 cpu_to_le32(U64_HI(cur_data_offset));
1420 cur_query_entry->address.lo =
1421 cpu_to_le32(U64_LO(cur_data_offset));
1422 }
1423
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001424 /**** Clients' queries ****/
1425 cur_data_offset = bp->fw_stats_data_mapping +
1426 offsetof(struct bnx2x_fw_stats_data, queue_stats);
1427
Barak Witkowski50f0a562011-12-05 21:52:23 +00001428 /* first queue query index depends whether FCoE offloaded request will
1429 * be included in the ramrod
1430 */
1431 if (!NO_FCOE(bp))
1432 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX;
1433 else
1434 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1;
1435
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001436 for_each_eth_queue(bp, i) {
1437 cur_query_entry =
1438 &bp->fw_stats_req->
Barak Witkowski50f0a562011-12-05 21:52:23 +00001439 query[first_queue_query_index + i];
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001440
1441 cur_query_entry->kind = STATS_TYPE_QUEUE;
1442 cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]);
1443 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1444 cur_query_entry->address.hi =
1445 cpu_to_le32(U64_HI(cur_data_offset));
1446 cur_query_entry->address.lo =
1447 cpu_to_le32(U64_LO(cur_data_offset));
1448
1449 cur_data_offset += sizeof(struct per_queue_stats);
1450 }
Barak Witkowski50f0a562011-12-05 21:52:23 +00001451
1452 /* add FCoE queue query if needed */
1453 if (!NO_FCOE(bp)) {
1454 cur_query_entry =
1455 &bp->fw_stats_req->
1456 query[first_queue_query_index + i];
1457
1458 cur_query_entry->kind = STATS_TYPE_QUEUE;
1459 cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX]);
1460 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1461 cur_query_entry->address.hi =
1462 cpu_to_le32(U64_HI(cur_data_offset));
1463 cur_query_entry->address.lo =
1464 cpu_to_le32(U64_LO(cur_data_offset));
1465 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001466}
1467
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001468void bnx2x_stats_init(struct bnx2x *bp)
1469{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001470 int /*abs*/port = BP_PORT(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001471 int mb_idx = BP_FW_MB_IDX(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001472 int i;
1473
1474 bp->stats_pending = 0;
1475 bp->executer_idx = 0;
1476 bp->stats_counter = 0;
1477
1478 /* port and func stats for management */
1479 if (!BP_NOMCP(bp)) {
1480 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001481 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001482
1483 } else {
1484 bp->port.port_stx = 0;
1485 bp->func_stx = 0;
1486 }
1487 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
1488 bp->port.port_stx, bp->func_stx);
1489
Mintz Yuval1355b702012-02-15 02:10:22 +00001490 /* pmf should retrieve port statistics from SP on a non-init*/
1491 if (!bp->stats_init && bp->port.pmf && bp->port.port_stx)
1492 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
1493
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001494 port = BP_PORT(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001495 /* port stats */
1496 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
1497 bp->port.old_nig_stats.brb_discard =
1498 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
1499 bp->port.old_nig_stats.brb_truncate =
1500 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001501 if (!CHIP_IS_E3(bp)) {
1502 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
1503 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
1504 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
1505 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
1506 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001507
1508 /* function stats */
1509 for_each_queue(bp, i) {
1510 struct bnx2x_fastpath *fp = &bp->fp[i];
1511
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001512 memset(&fp->old_tclient, 0, sizeof(fp->old_tclient));
1513 memset(&fp->old_uclient, 0, sizeof(fp->old_uclient));
1514 memset(&fp->old_xclient, 0, sizeof(fp->old_xclient));
Mintz Yuval1355b702012-02-15 02:10:22 +00001515 if (bp->stats_init) {
1516 memset(&fp->eth_q_stats, 0, sizeof(fp->eth_q_stats));
1517 memset(&fp->eth_q_stats_old, 0,
1518 sizeof(fp->eth_q_stats_old));
1519 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001520 }
1521
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001522 /* Prepare statistics ramrod data */
1523 bnx2x_prep_fw_stats_req(bp);
Dmitry Kravkov6fe8bce2010-10-06 03:35:03 +00001524
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001525 memset(&bp->dev->stats, 0, sizeof(bp->dev->stats));
Mintz Yuval1355b702012-02-15 02:10:22 +00001526 if (bp->stats_init) {
1527 memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old));
1528 memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old));
1529 memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old));
1530 memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
1531
1532 /* Clean SP from previous statistics */
1533 if (bp->func_stx) {
1534 memset(bnx2x_sp(bp, func_stats), 0,
1535 sizeof(struct host_func_stats));
1536 bnx2x_func_stats_init(bp);
1537 bnx2x_hw_stats_post(bp);
1538 bnx2x_stats_comp(bp);
1539 }
1540 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001541
1542 bp->stats_state = STATS_STATE_DISABLED;
1543
Mintz Yuval1355b702012-02-15 02:10:22 +00001544 if (bp->port.pmf && bp->port.port_stx)
1545 bnx2x_port_stats_base_init(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001546
Mintz Yuval1355b702012-02-15 02:10:22 +00001547 /* On a non-init, retrieve previous statistics from SP */
1548 if (!bp->stats_init && bp->func_stx)
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001549 bnx2x_func_stats_base_update(bp);
Mintz Yuval1355b702012-02-15 02:10:22 +00001550
1551 /* mark the end of statistics initializiation */
1552 bp->stats_init = false;
1553}
1554
1555void bnx2x_save_statistics(struct bnx2x *bp)
1556{
1557 int i;
1558 struct net_device_stats *nstats = &bp->dev->stats;
1559
1560 /* save queue statistics */
1561 for_each_eth_queue(bp, i) {
1562 struct bnx2x_fastpath *fp = &bp->fp[i];
1563 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
1564 struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
1565
1566 UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi);
1567 UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo);
1568 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi);
1569 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo);
1570 UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi);
1571 UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo);
1572 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi);
1573 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo);
1574 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi);
1575 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo);
1576 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi);
1577 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo);
1578 UPDATE_QSTAT_OLD(total_tpa_bytes_hi);
1579 UPDATE_QSTAT_OLD(total_tpa_bytes_lo);
1580 }
1581
1582 /* save net_device_stats statistics */
1583 bp->net_stats_old.rx_dropped = nstats->rx_dropped;
1584
1585 /* store port firmware statistics */
1586 if (bp->port.pmf && IS_MF(bp)) {
1587 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1588 struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old;
1589 UPDATE_FW_STAT_OLD(mac_filter_discard);
1590 UPDATE_FW_STAT_OLD(mf_tag_discard);
1591 UPDATE_FW_STAT_OLD(brb_truncate_discard);
1592 UPDATE_FW_STAT_OLD(mac_discard);
1593 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001594}