blob: d848dc9db7b08da2340c66ce6a1a1228ca564bbf [file] [log] [blame]
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001/* bnx2x_stats.c: Broadcom Everest network driver.
2 *
Ariel Elior85b26ea2012-01-26 06:01:54 +00003 * Copyright (c) 2007-2012 Broadcom Corporation
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
Joe Perchesf1deab52011-08-14 12:16:21 +000017
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Dmitry Kravkovf85582f2010-10-06 03:34:21 +000020#include "bnx2x_stats.h"
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030021#include "bnx2x_cmn.h"
22
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000023
24/* Statistics */
25
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000026/*
27 * General service functions
28 */
29
30static inline long bnx2x_hilo(u32 *hiref)
31{
32 u32 lo = *(hiref + 1);
33#if (BITS_PER_LONG == 64)
34 u32 hi = *hiref;
35
36 return HILO_U64(hi, lo);
37#else
38 return lo;
39#endif
40}
41
Barak Witkowski1d187b32011-12-05 22:41:50 +000042static u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp)
43{
44 u16 res = sizeof(struct host_port_stats) >> 2;
45
46 /* if PFC stats are not supported by the MFW, don't DMA them */
47 if (!(bp->flags & BC_SUPPORTS_PFC_STATS))
48 res -= (sizeof(u32)*4) >> 2;
49
50 return res;
51}
52
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000053/*
54 * Init service functions
55 */
56
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030057/* Post the next statistics ramrod. Protect it with the spin in
58 * order to ensure the strict order between statistics ramrods
59 * (each ramrod has a sequence number passed in a
60 * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be
61 * sent in order).
62 */
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000063static void bnx2x_storm_stats_post(struct bnx2x *bp)
64{
65 if (!bp->stats_pending) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030066 int rc;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000067
David S. Millerbb7e95c2010-07-27 21:01:35 -070068 spin_lock_bh(&bp->stats_lock);
69
Vladislav Zolotarov9bcb8012011-01-09 02:20:34 +000070 if (bp->stats_pending) {
71 spin_unlock_bh(&bp->stats_lock);
72 return;
73 }
74
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030075 bp->fw_stats_req->hdr.drv_stats_counter =
76 cpu_to_le16(bp->stats_counter++);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000077
Merav Sicron51c1a582012-03-18 10:33:38 +000078 DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030079 bp->fw_stats_req->hdr.drv_stats_counter);
80
81
82
83 /* send FW stats ramrod */
Dmitry Kravkov523224a2010-10-06 03:23:26 +000084 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030085 U64_HI(bp->fw_stats_req_mapping),
86 U64_LO(bp->fw_stats_req_mapping),
87 NONE_CONNECTION_TYPE);
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +000088 if (rc == 0)
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000089 bp->stats_pending = 1;
David S. Millerbb7e95c2010-07-27 21:01:35 -070090
91 spin_unlock_bh(&bp->stats_lock);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000092 }
93}
94
95static void bnx2x_hw_stats_post(struct bnx2x *bp)
96{
97 struct dmae_command *dmae = &bp->stats_dmae;
98 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
99
100 *stats_comp = DMAE_COMP_VAL;
101 if (CHIP_REV_IS_SLOW(bp))
102 return;
103
104 /* loader */
105 if (bp->executer_idx) {
106 int loader_idx = PMF_DMAE_C(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000107 u32 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
108 true, DMAE_COMP_GRC);
109 opcode = bnx2x_dmae_opcode_clr_src_reset(opcode);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000110
111 memset(dmae, 0, sizeof(struct dmae_command));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000112 dmae->opcode = opcode;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000113 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
114 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
115 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
116 sizeof(struct dmae_command) *
117 (loader_idx + 1)) >> 2;
118 dmae->dst_addr_hi = 0;
119 dmae->len = sizeof(struct dmae_command) >> 2;
120 if (CHIP_IS_E1(bp))
121 dmae->len--;
122 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
123 dmae->comp_addr_hi = 0;
124 dmae->comp_val = 1;
125
126 *stats_comp = 0;
127 bnx2x_post_dmae(bp, dmae, loader_idx);
128
129 } else if (bp->func_stx) {
130 *stats_comp = 0;
Yuval Mintzcb4dca22012-03-18 10:33:44 +0000131 memcpy(bnx2x_sp(bp, func_stats), &bp->func_stats,
132 sizeof(bp->func_stats));
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000133 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
134 }
135}
136
137static int bnx2x_stats_comp(struct bnx2x *bp)
138{
139 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
140 int cnt = 10;
141
142 might_sleep();
143 while (*stats_comp != DMAE_COMP_VAL) {
144 if (!cnt) {
145 BNX2X_ERR("timeout waiting for stats finished\n");
146 break;
147 }
148 cnt--;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300149 usleep_range(1000, 1000);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000150 }
151 return 1;
152}
153
154/*
155 * Statistics service functions
156 */
157
158static void bnx2x_stats_pmf_update(struct bnx2x *bp)
159{
160 struct dmae_command *dmae;
161 u32 opcode;
162 int loader_idx = PMF_DMAE_C(bp);
163 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
164
165 /* sanity */
Mintz Yuval1355b702012-02-15 02:10:22 +0000166 if (!bp->port.pmf || !bp->port.port_stx) {
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000167 BNX2X_ERR("BUG!\n");
168 return;
169 }
170
171 bp->executer_idx = 0;
172
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000173 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000174
175 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000176 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000177 dmae->src_addr_lo = bp->port.port_stx >> 2;
178 dmae->src_addr_hi = 0;
179 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
180 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
181 dmae->len = DMAE_LEN32_RD_MAX;
182 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
183 dmae->comp_addr_hi = 0;
184 dmae->comp_val = 1;
185
186 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000187 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000188 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
189 dmae->src_addr_hi = 0;
190 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
191 DMAE_LEN32_RD_MAX * 4);
192 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
193 DMAE_LEN32_RD_MAX * 4);
Barak Witkowski1d187b32011-12-05 22:41:50 +0000194 dmae->len = bnx2x_get_port_stats_dma_len(bp) - DMAE_LEN32_RD_MAX;
195
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000196 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
197 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
198 dmae->comp_val = DMAE_COMP_VAL;
199
200 *stats_comp = 0;
201 bnx2x_hw_stats_post(bp);
202 bnx2x_stats_comp(bp);
203}
204
205static void bnx2x_port_stats_init(struct bnx2x *bp)
206{
207 struct dmae_command *dmae;
208 int port = BP_PORT(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000209 u32 opcode;
210 int loader_idx = PMF_DMAE_C(bp);
211 u32 mac_addr;
212 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
213
214 /* sanity */
215 if (!bp->link_vars.link_up || !bp->port.pmf) {
216 BNX2X_ERR("BUG!\n");
217 return;
218 }
219
220 bp->executer_idx = 0;
221
222 /* MCP */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000223 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
224 true, DMAE_COMP_GRC);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000225
226 if (bp->port.port_stx) {
227
228 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
229 dmae->opcode = opcode;
230 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
231 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
232 dmae->dst_addr_lo = bp->port.port_stx >> 2;
233 dmae->dst_addr_hi = 0;
Barak Witkowski1d187b32011-12-05 22:41:50 +0000234 dmae->len = bnx2x_get_port_stats_dma_len(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000235 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
236 dmae->comp_addr_hi = 0;
237 dmae->comp_val = 1;
238 }
239
240 if (bp->func_stx) {
241
242 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
243 dmae->opcode = opcode;
244 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
245 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
246 dmae->dst_addr_lo = bp->func_stx >> 2;
247 dmae->dst_addr_hi = 0;
248 dmae->len = sizeof(struct host_func_stats) >> 2;
249 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
250 dmae->comp_addr_hi = 0;
251 dmae->comp_val = 1;
252 }
253
254 /* MAC */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000255 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
256 true, DMAE_COMP_GRC);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000257
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300258 /* EMAC is special */
259 if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000260 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
261
262 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
263 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
264 dmae->opcode = opcode;
265 dmae->src_addr_lo = (mac_addr +
266 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
267 dmae->src_addr_hi = 0;
268 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
269 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
270 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
271 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
272 dmae->comp_addr_hi = 0;
273 dmae->comp_val = 1;
274
275 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
276 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
277 dmae->opcode = opcode;
278 dmae->src_addr_lo = (mac_addr +
279 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
280 dmae->src_addr_hi = 0;
281 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
282 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
283 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
284 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
285 dmae->len = 1;
286 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
287 dmae->comp_addr_hi = 0;
288 dmae->comp_val = 1;
289
290 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
291 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
292 dmae->opcode = opcode;
293 dmae->src_addr_lo = (mac_addr +
294 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
295 dmae->src_addr_hi = 0;
296 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
297 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
298 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
299 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
300 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
301 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
302 dmae->comp_addr_hi = 0;
303 dmae->comp_val = 1;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300304 } else {
305 u32 tx_src_addr_lo, rx_src_addr_lo;
306 u16 rx_len, tx_len;
307
308 /* configure the params according to MAC type */
309 switch (bp->link_vars.mac_type) {
310 case MAC_TYPE_BMAC:
311 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
312 NIG_REG_INGRESS_BMAC0_MEM);
313
314 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
315 BIGMAC_REGISTER_TX_STAT_GTBYT */
316 if (CHIP_IS_E1x(bp)) {
317 tx_src_addr_lo = (mac_addr +
318 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
319 tx_len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
320 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
321 rx_src_addr_lo = (mac_addr +
322 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
323 rx_len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
324 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
325 } else {
326 tx_src_addr_lo = (mac_addr +
327 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
328 tx_len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
329 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
330 rx_src_addr_lo = (mac_addr +
331 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
332 rx_len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
333 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
334 }
335 break;
336
337 case MAC_TYPE_UMAC: /* handled by MSTAT */
338 case MAC_TYPE_XMAC: /* handled by MSTAT */
339 default:
340 mac_addr = port ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0;
341 tx_src_addr_lo = (mac_addr +
342 MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2;
343 rx_src_addr_lo = (mac_addr +
344 MSTAT_REG_RX_STAT_GR64_LO) >> 2;
345 tx_len = sizeof(bp->slowpath->
346 mac_stats.mstat_stats.stats_tx) >> 2;
347 rx_len = sizeof(bp->slowpath->
348 mac_stats.mstat_stats.stats_rx) >> 2;
349 break;
350 }
351
352 /* TX stats */
353 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
354 dmae->opcode = opcode;
355 dmae->src_addr_lo = tx_src_addr_lo;
356 dmae->src_addr_hi = 0;
357 dmae->len = tx_len;
358 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
359 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
360 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
361 dmae->comp_addr_hi = 0;
362 dmae->comp_val = 1;
363
364 /* RX stats */
365 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
366 dmae->opcode = opcode;
367 dmae->src_addr_hi = 0;
368 dmae->src_addr_lo = rx_src_addr_lo;
369 dmae->dst_addr_lo =
370 U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
371 dmae->dst_addr_hi =
372 U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
373 dmae->len = rx_len;
374 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
375 dmae->comp_addr_hi = 0;
376 dmae->comp_val = 1;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000377 }
378
379 /* NIG */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300380 if (!CHIP_IS_E3(bp)) {
381 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
382 dmae->opcode = opcode;
383 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
384 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
385 dmae->src_addr_hi = 0;
386 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
387 offsetof(struct nig_stats, egress_mac_pkt0_lo));
388 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
389 offsetof(struct nig_stats, egress_mac_pkt0_lo));
390 dmae->len = (2*sizeof(u32)) >> 2;
391 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
392 dmae->comp_addr_hi = 0;
393 dmae->comp_val = 1;
394
395 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
396 dmae->opcode = opcode;
397 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
398 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
399 dmae->src_addr_hi = 0;
400 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
401 offsetof(struct nig_stats, egress_mac_pkt1_lo));
402 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
403 offsetof(struct nig_stats, egress_mac_pkt1_lo));
404 dmae->len = (2*sizeof(u32)) >> 2;
405 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
406 dmae->comp_addr_hi = 0;
407 dmae->comp_val = 1;
408 }
409
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000410 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300411 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
412 true, DMAE_COMP_PCI);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000413 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
414 NIG_REG_STAT0_BRB_DISCARD) >> 2;
415 dmae->src_addr_hi = 0;
416 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
417 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
418 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000419
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000420 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
421 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
422 dmae->comp_val = DMAE_COMP_VAL;
423
424 *stats_comp = 0;
425}
426
427static void bnx2x_func_stats_init(struct bnx2x *bp)
428{
429 struct dmae_command *dmae = &bp->stats_dmae;
430 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
431
432 /* sanity */
433 if (!bp->func_stx) {
434 BNX2X_ERR("BUG!\n");
435 return;
436 }
437
438 bp->executer_idx = 0;
439 memset(dmae, 0, sizeof(struct dmae_command));
440
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000441 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
442 true, DMAE_COMP_PCI);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000443 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
444 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
445 dmae->dst_addr_lo = bp->func_stx >> 2;
446 dmae->dst_addr_hi = 0;
447 dmae->len = sizeof(struct host_func_stats) >> 2;
448 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
449 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
450 dmae->comp_val = DMAE_COMP_VAL;
451
452 *stats_comp = 0;
453}
454
455static void bnx2x_stats_start(struct bnx2x *bp)
456{
457 if (bp->port.pmf)
458 bnx2x_port_stats_init(bp);
459
460 else if (bp->func_stx)
461 bnx2x_func_stats_init(bp);
462
463 bnx2x_hw_stats_post(bp);
464 bnx2x_storm_stats_post(bp);
465}
466
467static void bnx2x_stats_pmf_start(struct bnx2x *bp)
468{
469 bnx2x_stats_comp(bp);
470 bnx2x_stats_pmf_update(bp);
471 bnx2x_stats_start(bp);
472}
473
474static void bnx2x_stats_restart(struct bnx2x *bp)
475{
476 bnx2x_stats_comp(bp);
477 bnx2x_stats_start(bp);
478}
479
480static void bnx2x_bmac_stats_update(struct bnx2x *bp)
481{
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000482 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
483 struct bnx2x_eth_stats *estats = &bp->eth_stats;
484 struct {
485 u32 lo;
486 u32 hi;
487 } diff;
488
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000489 if (CHIP_IS_E1x(bp)) {
490 struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats);
491
492 /* the macros below will use "bmac1_stats" type */
493 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
494 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
495 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
496 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
497 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
498 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
499 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
500 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300501 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
502
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000503 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
504 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
505 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
506 UPDATE_STAT64(tx_stat_gt127,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000507 tx_stat_etherstatspkts65octetsto127octets);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000508 UPDATE_STAT64(tx_stat_gt255,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000509 tx_stat_etherstatspkts128octetsto255octets);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000510 UPDATE_STAT64(tx_stat_gt511,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000511 tx_stat_etherstatspkts256octetsto511octets);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000512 UPDATE_STAT64(tx_stat_gt1023,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000513 tx_stat_etherstatspkts512octetsto1023octets);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000514 UPDATE_STAT64(tx_stat_gt1518,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000515 tx_stat_etherstatspkts1024octetsto1522octets);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300516 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
517 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
518 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
519 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000520 UPDATE_STAT64(tx_stat_gterr,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000521 tx_stat_dot3statsinternalmactransmiterrors);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300522 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000523
524 } else {
525 struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats);
526
527 /* the macros below will use "bmac2_stats" type */
528 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
529 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
530 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
531 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
532 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
533 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
534 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
535 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300536 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000537 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
538 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
539 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
540 UPDATE_STAT64(tx_stat_gt127,
541 tx_stat_etherstatspkts65octetsto127octets);
542 UPDATE_STAT64(tx_stat_gt255,
543 tx_stat_etherstatspkts128octetsto255octets);
544 UPDATE_STAT64(tx_stat_gt511,
545 tx_stat_etherstatspkts256octetsto511octets);
546 UPDATE_STAT64(tx_stat_gt1023,
547 tx_stat_etherstatspkts512octetsto1023octets);
548 UPDATE_STAT64(tx_stat_gt1518,
549 tx_stat_etherstatspkts1024octetsto1522octets);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300550 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
551 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
552 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
553 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000554 UPDATE_STAT64(tx_stat_gterr,
555 tx_stat_dot3statsinternalmactransmiterrors);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300556 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000557
558 /* collect PFC stats */
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000559 pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
560 pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000561
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000562 pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
563 pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000564 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000565
566 estats->pause_frames_received_hi =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300567 pstats->mac_stx[1].rx_stat_mac_xpf_hi;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000568 estats->pause_frames_received_lo =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300569 pstats->mac_stx[1].rx_stat_mac_xpf_lo;
570
571 estats->pause_frames_sent_hi =
572 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
573 estats->pause_frames_sent_lo =
574 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000575
576 estats->pfc_frames_received_hi =
577 pstats->pfc_frames_rx_hi;
578 estats->pfc_frames_received_lo =
579 pstats->pfc_frames_rx_lo;
580 estats->pfc_frames_sent_hi =
581 pstats->pfc_frames_tx_hi;
582 estats->pfc_frames_sent_lo =
583 pstats->pfc_frames_tx_lo;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300584}
585
586static void bnx2x_mstat_stats_update(struct bnx2x *bp)
587{
588 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
589 struct bnx2x_eth_stats *estats = &bp->eth_stats;
590
591 struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats);
592
593 ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets);
594 ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors);
595 ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts);
596 ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong);
597 ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments);
598 ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived);
599 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered);
600 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf);
601 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
602 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
603
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000604 /* collect pfc stats */
605 ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi,
606 pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo);
607 ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi,
608 pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300609
610 ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
611 ADD_STAT64(stats_tx.tx_gt127,
612 tx_stat_etherstatspkts65octetsto127octets);
613 ADD_STAT64(stats_tx.tx_gt255,
614 tx_stat_etherstatspkts128octetsto255octets);
615 ADD_STAT64(stats_tx.tx_gt511,
616 tx_stat_etherstatspkts256octetsto511octets);
617 ADD_STAT64(stats_tx.tx_gt1023,
618 tx_stat_etherstatspkts512octetsto1023octets);
619 ADD_STAT64(stats_tx.tx_gt1518,
620 tx_stat_etherstatspkts1024octetsto1522octets);
621 ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047);
622
623 ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095);
624 ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216);
625 ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383);
626
627 ADD_STAT64(stats_tx.tx_gterr,
628 tx_stat_dot3statsinternalmactransmiterrors);
629 ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl);
630
Mintz Yuval1355b702012-02-15 02:10:22 +0000631 estats->etherstatspkts1024octetsto1522octets_hi =
632 pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi;
633 estats->etherstatspkts1024octetsto1522octets_lo =
634 pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo;
635
636 estats->etherstatspktsover1522octets_hi =
637 pstats->mac_stx[1].tx_stat_mac_2047_hi;
638 estats->etherstatspktsover1522octets_lo =
639 pstats->mac_stx[1].tx_stat_mac_2047_lo;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300640
641 ADD_64(estats->etherstatspktsover1522octets_hi,
Mintz Yuval1355b702012-02-15 02:10:22 +0000642 pstats->mac_stx[1].tx_stat_mac_4095_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300643 estats->etherstatspktsover1522octets_lo,
Mintz Yuval1355b702012-02-15 02:10:22 +0000644 pstats->mac_stx[1].tx_stat_mac_4095_lo);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300645
646 ADD_64(estats->etherstatspktsover1522octets_hi,
Mintz Yuval1355b702012-02-15 02:10:22 +0000647 pstats->mac_stx[1].tx_stat_mac_9216_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300648 estats->etherstatspktsover1522octets_lo,
Mintz Yuval1355b702012-02-15 02:10:22 +0000649 pstats->mac_stx[1].tx_stat_mac_9216_lo);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300650
651 ADD_64(estats->etherstatspktsover1522octets_hi,
Mintz Yuval1355b702012-02-15 02:10:22 +0000652 pstats->mac_stx[1].tx_stat_mac_16383_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300653 estats->etherstatspktsover1522octets_lo,
Mintz Yuval1355b702012-02-15 02:10:22 +0000654 pstats->mac_stx[1].tx_stat_mac_16383_lo);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300655
656 estats->pause_frames_received_hi =
657 pstats->mac_stx[1].rx_stat_mac_xpf_hi;
658 estats->pause_frames_received_lo =
659 pstats->mac_stx[1].rx_stat_mac_xpf_lo;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000660
661 estats->pause_frames_sent_hi =
662 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
663 estats->pause_frames_sent_lo =
664 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000665
666 estats->pfc_frames_received_hi =
667 pstats->pfc_frames_rx_hi;
668 estats->pfc_frames_received_lo =
669 pstats->pfc_frames_rx_lo;
670 estats->pfc_frames_sent_hi =
671 pstats->pfc_frames_tx_hi;
672 estats->pfc_frames_sent_lo =
673 pstats->pfc_frames_tx_lo;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000674}
675
676static void bnx2x_emac_stats_update(struct bnx2x *bp)
677{
678 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
679 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
680 struct bnx2x_eth_stats *estats = &bp->eth_stats;
681
682 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
683 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
684 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
685 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
686 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
687 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
688 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
689 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
690 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
691 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
692 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
693 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
694 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
695 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
696 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
697 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
698 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
699 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
700 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
701 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
702 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
703 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
704 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
705 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
706 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
707 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
708 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
709 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
710 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
711 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
712 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
713
714 estats->pause_frames_received_hi =
715 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
716 estats->pause_frames_received_lo =
717 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
718 ADD_64(estats->pause_frames_received_hi,
719 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
720 estats->pause_frames_received_lo,
721 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
722
723 estats->pause_frames_sent_hi =
724 pstats->mac_stx[1].tx_stat_outxonsent_hi;
725 estats->pause_frames_sent_lo =
726 pstats->mac_stx[1].tx_stat_outxonsent_lo;
727 ADD_64(estats->pause_frames_sent_hi,
728 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
729 estats->pause_frames_sent_lo,
730 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
731}
732
733static int bnx2x_hw_stats_update(struct bnx2x *bp)
734{
735 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
736 struct nig_stats *old = &(bp->port.old_nig_stats);
737 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
738 struct bnx2x_eth_stats *estats = &bp->eth_stats;
739 struct {
740 u32 lo;
741 u32 hi;
742 } diff;
743
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300744 switch (bp->link_vars.mac_type) {
745 case MAC_TYPE_BMAC:
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000746 bnx2x_bmac_stats_update(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300747 break;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000748
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300749 case MAC_TYPE_EMAC:
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000750 bnx2x_emac_stats_update(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300751 break;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000752
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300753 case MAC_TYPE_UMAC:
754 case MAC_TYPE_XMAC:
755 bnx2x_mstat_stats_update(bp);
756 break;
757
758 case MAC_TYPE_NONE: /* unreached */
David S. Miller8decf862011-09-22 03:23:13 -0400759 DP(BNX2X_MSG_STATS,
760 "stats updated by DMAE but no MAC active\n");
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000761 return -1;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300762
763 default: /* unreached */
764 BNX2X_ERR("Unknown MAC type\n");
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000765 }
766
767 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
768 new->brb_discard - old->brb_discard);
769 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
770 new->brb_truncate - old->brb_truncate);
771
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300772 if (!CHIP_IS_E3(bp)) {
773 UPDATE_STAT64_NIG(egress_mac_pkt0,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000774 etherstatspkts1024octetsto1522octets);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300775 UPDATE_STAT64_NIG(egress_mac_pkt1,
776 etherstatspktsover1522octets);
777 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000778
779 memcpy(old, new, sizeof(struct nig_stats));
780
781 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
782 sizeof(struct mac_stx));
783 estats->brb_drop_hi = pstats->brb_drop_hi;
784 estats->brb_drop_lo = pstats->brb_drop_lo;
785
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000786 pstats->host_port_stats_counter++;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000787
Yuval Mintzc20cd5d2012-07-23 21:16:06 +0000788 if (CHIP_IS_E3(bp)) {
789 u32 lpi_reg = BP_PORT(bp) ? MISC_REG_CPMU_LP_SM_ENT_CNT_P1
790 : MISC_REG_CPMU_LP_SM_ENT_CNT_P0;
791 estats->eee_tx_lpi += REG_RD(bp, lpi_reg);
792 }
Yuval Mintzc8c60d82012-06-06 17:13:07 +0000793
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000794 if (!BP_NOMCP(bp)) {
795 u32 nig_timer_max =
796 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
797 if (nig_timer_max != estats->nig_timer_max) {
798 estats->nig_timer_max = nig_timer_max;
799 BNX2X_ERR("NIG timer max (%u)\n",
800 estats->nig_timer_max);
801 }
802 }
803
804 return 0;
805}
806
807static int bnx2x_storm_stats_update(struct bnx2x *bp)
808{
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000809 struct tstorm_per_port_stats *tport =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300810 &bp->fw_stats_data->port.tstorm_port_statistics;
811 struct tstorm_per_pf_stats *tfunc =
812 &bp->fw_stats_data->pf.tstorm_pf_statistics;
Yuval Mintzcb4dca22012-03-18 10:33:44 +0000813 struct host_func_stats *fstats = &bp->func_stats;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000814 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Mintz Yuval1355b702012-02-15 02:10:22 +0000815 struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300816 struct stats_counter *counters = &bp->fw_stats_data->storm_counters;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000817 int i;
David S. Millerbb7e95c2010-07-27 21:01:35 -0700818 u16 cur_stats_counter;
819
820 /* Make sure we use the value of the counter
821 * used for sending the last stats ramrod.
822 */
823 spin_lock_bh(&bp->stats_lock);
824 cur_stats_counter = bp->stats_counter - 1;
825 spin_unlock_bh(&bp->stats_lock);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000826
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300827 /* are storm stats valid? */
828 if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000829 DP(BNX2X_MSG_STATS,
830 "stats not updated by xstorm xstorm counter (0x%x) != stats_counter (0x%x)\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300831 le16_to_cpu(counters->xstats_counter), bp->stats_counter);
832 return -EAGAIN;
833 }
834
835 if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000836 DP(BNX2X_MSG_STATS,
837 "stats not updated by ustorm ustorm counter (0x%x) != stats_counter (0x%x)\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300838 le16_to_cpu(counters->ustats_counter), bp->stats_counter);
839 return -EAGAIN;
840 }
841
842 if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000843 DP(BNX2X_MSG_STATS,
844 "stats not updated by cstorm cstorm counter (0x%x) != stats_counter (0x%x)\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300845 le16_to_cpu(counters->cstats_counter), bp->stats_counter);
846 return -EAGAIN;
847 }
848
849 if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000850 DP(BNX2X_MSG_STATS,
851 "stats not updated by tstorm tstorm counter (0x%x) != stats_counter (0x%x)\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300852 le16_to_cpu(counters->tstats_counter), bp->stats_counter);
853 return -EAGAIN;
854 }
855
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000856 estats->error_bytes_received_hi = 0;
857 estats->error_bytes_received_lo = 0;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000858
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000859 for_each_eth_queue(bp, i) {
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000860 struct bnx2x_fastpath *fp = &bp->fp[i];
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300861 struct tstorm_per_queue_stats *tclient =
862 &bp->fw_stats_data->queue_stats[i].
863 tstorm_queue_statistics;
Barak Witkowski15192a82012-06-19 07:48:28 +0000864 struct tstorm_per_queue_stats *old_tclient =
865 &bnx2x_fp_stats(bp, fp)->old_tclient;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300866 struct ustorm_per_queue_stats *uclient =
867 &bp->fw_stats_data->queue_stats[i].
868 ustorm_queue_statistics;
Barak Witkowski15192a82012-06-19 07:48:28 +0000869 struct ustorm_per_queue_stats *old_uclient =
870 &bnx2x_fp_stats(bp, fp)->old_uclient;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300871 struct xstorm_per_queue_stats *xclient =
872 &bp->fw_stats_data->queue_stats[i].
873 xstorm_queue_statistics;
Barak Witkowski15192a82012-06-19 07:48:28 +0000874 struct xstorm_per_queue_stats *old_xclient =
875 &bnx2x_fp_stats(bp, fp)->old_xclient;
876 struct bnx2x_eth_q_stats *qstats =
877 &bnx2x_fp_stats(bp, fp)->eth_q_stats;
878 struct bnx2x_eth_q_stats_old *qstats_old =
879 &bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
Mintz Yuval1355b702012-02-15 02:10:22 +0000880
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000881 u32 diff;
882
Merav Sicron51c1a582012-03-18 10:33:38 +0000883 DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, bcast_sent 0x%x mcast_sent 0x%x\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300884 i, xclient->ucast_pkts_sent,
885 xclient->bcast_pkts_sent, xclient->mcast_pkts_sent);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000886
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300887 DP(BNX2X_MSG_STATS, "---------------\n");
888
Mintz Yuval1355b702012-02-15 02:10:22 +0000889 UPDATE_QSTAT(tclient->rcv_bcast_bytes,
890 total_broadcast_bytes_received);
891 UPDATE_QSTAT(tclient->rcv_mcast_bytes,
892 total_multicast_bytes_received);
893 UPDATE_QSTAT(tclient->rcv_ucast_bytes,
894 total_unicast_bytes_received);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300895
896 /*
897 * sum to total_bytes_received all
898 * unicast/multicast/broadcast
899 */
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000900 qstats->total_bytes_received_hi =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300901 qstats->total_broadcast_bytes_received_hi;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000902 qstats->total_bytes_received_lo =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300903 qstats->total_broadcast_bytes_received_lo;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000904
905 ADD_64(qstats->total_bytes_received_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300906 qstats->total_multicast_bytes_received_hi,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000907 qstats->total_bytes_received_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300908 qstats->total_multicast_bytes_received_lo);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000909
910 ADD_64(qstats->total_bytes_received_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300911 qstats->total_unicast_bytes_received_hi,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000912 qstats->total_bytes_received_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300913 qstats->total_unicast_bytes_received_lo);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000914
915 qstats->valid_bytes_received_hi =
916 qstats->total_bytes_received_hi;
917 qstats->valid_bytes_received_lo =
918 qstats->total_bytes_received_lo;
919
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000920
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300921 UPDATE_EXTEND_TSTAT(rcv_ucast_pkts,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000922 total_unicast_packets_received);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300923 UPDATE_EXTEND_TSTAT(rcv_mcast_pkts,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000924 total_multicast_packets_received);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300925 UPDATE_EXTEND_TSTAT(rcv_bcast_pkts,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000926 total_broadcast_packets_received);
Mintz Yuval1355b702012-02-15 02:10:22 +0000927 UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard,
928 etherstatsoverrsizepkts);
929 UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000930
931 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
932 total_unicast_packets_received);
933 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
934 total_multicast_packets_received);
935 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
936 total_broadcast_packets_received);
Mintz Yuval1355b702012-02-15 02:10:22 +0000937 UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard);
938 UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard);
939 UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000940
Mintz Yuval1355b702012-02-15 02:10:22 +0000941 UPDATE_QSTAT(xclient->bcast_bytes_sent,
942 total_broadcast_bytes_transmitted);
943 UPDATE_QSTAT(xclient->mcast_bytes_sent,
944 total_multicast_bytes_transmitted);
945 UPDATE_QSTAT(xclient->ucast_bytes_sent,
946 total_unicast_bytes_transmitted);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300947
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300948 /*
949 * sum to total_bytes_transmitted all
950 * unicast/multicast/broadcast
951 */
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000952 qstats->total_bytes_transmitted_hi =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300953 qstats->total_unicast_bytes_transmitted_hi;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000954 qstats->total_bytes_transmitted_lo =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300955 qstats->total_unicast_bytes_transmitted_lo;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000956
957 ADD_64(qstats->total_bytes_transmitted_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300958 qstats->total_broadcast_bytes_transmitted_hi,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000959 qstats->total_bytes_transmitted_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300960 qstats->total_broadcast_bytes_transmitted_lo);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000961
962 ADD_64(qstats->total_bytes_transmitted_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300963 qstats->total_multicast_bytes_transmitted_hi,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000964 qstats->total_bytes_transmitted_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300965 qstats->total_multicast_bytes_transmitted_lo);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000966
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300967 UPDATE_EXTEND_XSTAT(ucast_pkts_sent,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000968 total_unicast_packets_transmitted);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300969 UPDATE_EXTEND_XSTAT(mcast_pkts_sent,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000970 total_multicast_packets_transmitted);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300971 UPDATE_EXTEND_XSTAT(bcast_pkts_sent,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000972 total_broadcast_packets_transmitted);
973
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300974 UPDATE_EXTEND_TSTAT(checksum_discard,
975 total_packets_received_checksum_discarded);
976 UPDATE_EXTEND_TSTAT(ttl0_discard,
977 total_packets_received_ttl0_discarded);
978
979 UPDATE_EXTEND_XSTAT(error_drop_pkts,
980 total_transmitted_dropped_packets_error);
981
982 /* TPA aggregations completed */
Mintz Yuval1355b702012-02-15 02:10:22 +0000983 UPDATE_EXTEND_E_USTAT(coalesced_events, total_tpa_aggregations);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300984 /* Number of network frames aggregated by TPA */
Mintz Yuval1355b702012-02-15 02:10:22 +0000985 UPDATE_EXTEND_E_USTAT(coalesced_pkts,
986 total_tpa_aggregated_frames);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300987 /* Total number of bytes in completed TPA aggregations */
Mintz Yuval1355b702012-02-15 02:10:22 +0000988 UPDATE_QSTAT(uclient->coalesced_bytes, total_tpa_bytes);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300989
Mintz Yuval1355b702012-02-15 02:10:22 +0000990 UPDATE_ESTAT_QSTAT_64(total_tpa_bytes);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000991
Mintz Yuval1355b702012-02-15 02:10:22 +0000992 UPDATE_FSTAT_QSTAT(total_bytes_received);
993 UPDATE_FSTAT_QSTAT(total_bytes_transmitted);
994 UPDATE_FSTAT_QSTAT(total_unicast_packets_received);
995 UPDATE_FSTAT_QSTAT(total_multicast_packets_received);
996 UPDATE_FSTAT_QSTAT(total_broadcast_packets_received);
997 UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted);
998 UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted);
999 UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted);
1000 UPDATE_FSTAT_QSTAT(valid_bytes_received);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001001 }
1002
Mintz Yuval1355b702012-02-15 02:10:22 +00001003 ADD_64(estats->total_bytes_received_hi,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001004 estats->rx_stat_ifhcinbadoctets_hi,
Mintz Yuval1355b702012-02-15 02:10:22 +00001005 estats->total_bytes_received_lo,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001006 estats->rx_stat_ifhcinbadoctets_lo);
1007
Mintz Yuval1355b702012-02-15 02:10:22 +00001008 ADD_64(estats->total_bytes_received_hi,
Eric Dumazet66d885c2012-02-13 06:23:12 +00001009 le32_to_cpu(tfunc->rcv_error_bytes.hi),
Mintz Yuval1355b702012-02-15 02:10:22 +00001010 estats->total_bytes_received_lo,
Eric Dumazet66d885c2012-02-13 06:23:12 +00001011 le32_to_cpu(tfunc->rcv_error_bytes.lo));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001012
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001013 ADD_64(estats->error_bytes_received_hi,
Eric Dumazet66d885c2012-02-13 06:23:12 +00001014 le32_to_cpu(tfunc->rcv_error_bytes.hi),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001015 estats->error_bytes_received_lo,
Eric Dumazet66d885c2012-02-13 06:23:12 +00001016 le32_to_cpu(tfunc->rcv_error_bytes.lo));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001017
Mintz Yuval1355b702012-02-15 02:10:22 +00001018 UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong);
1019
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001020 ADD_64(estats->error_bytes_received_hi,
1021 estats->rx_stat_ifhcinbadoctets_hi,
1022 estats->error_bytes_received_lo,
1023 estats->rx_stat_ifhcinbadoctets_lo);
1024
1025 if (bp->port.pmf) {
Mintz Yuval1355b702012-02-15 02:10:22 +00001026 struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old;
1027 UPDATE_FW_STAT(mac_filter_discard);
1028 UPDATE_FW_STAT(mf_tag_discard);
1029 UPDATE_FW_STAT(brb_truncate_discard);
1030 UPDATE_FW_STAT(mac_discard);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001031 }
1032
1033 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
1034
1035 bp->stats_pending = 0;
1036
1037 return 0;
1038}
1039
1040static void bnx2x_net_stats_update(struct bnx2x *bp)
1041{
1042 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1043 struct net_device_stats *nstats = &bp->dev->stats;
Eric Dumazetc6a056a2010-08-23 19:44:08 +00001044 unsigned long tmp;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001045 int i;
1046
1047 nstats->rx_packets =
1048 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
1049 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
1050 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
1051
1052 nstats->tx_packets =
1053 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
1054 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
1055 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
1056
1057 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
1058
1059 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
1060
Eric Dumazetc6a056a2010-08-23 19:44:08 +00001061 tmp = estats->mac_discard;
Barak Witkowski15192a82012-06-19 07:48:28 +00001062 for_each_rx_queue(bp, i) {
1063 struct tstorm_per_queue_stats *old_tclient =
1064 &bp->fp_stats[i].old_tclient;
1065 tmp += le32_to_cpu(old_tclient->checksum_discard);
1066 }
Mintz Yuval1355b702012-02-15 02:10:22 +00001067 nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001068
1069 nstats->tx_dropped = 0;
1070
1071 nstats->multicast =
1072 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
1073
1074 nstats->collisions =
1075 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
1076
1077 nstats->rx_length_errors =
1078 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
1079 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
1080 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
1081 bnx2x_hilo(&estats->brb_truncate_hi);
1082 nstats->rx_crc_errors =
1083 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
1084 nstats->rx_frame_errors =
1085 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
1086 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001087 nstats->rx_missed_errors = 0;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001088
1089 nstats->rx_errors = nstats->rx_length_errors +
1090 nstats->rx_over_errors +
1091 nstats->rx_crc_errors +
1092 nstats->rx_frame_errors +
1093 nstats->rx_fifo_errors +
1094 nstats->rx_missed_errors;
1095
1096 nstats->tx_aborted_errors =
1097 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
1098 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
1099 nstats->tx_carrier_errors =
1100 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
1101 nstats->tx_fifo_errors = 0;
1102 nstats->tx_heartbeat_errors = 0;
1103 nstats->tx_window_errors = 0;
1104
1105 nstats->tx_errors = nstats->tx_aborted_errors +
1106 nstats->tx_carrier_errors +
1107 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
1108}
1109
1110static void bnx2x_drv_stats_update(struct bnx2x *bp)
1111{
1112 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1113 int i;
1114
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001115 for_each_queue(bp, i) {
Barak Witkowski15192a82012-06-19 07:48:28 +00001116 struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
Mintz Yuval1355b702012-02-15 02:10:22 +00001117 struct bnx2x_eth_q_stats_old *qstats_old =
Barak Witkowski15192a82012-06-19 07:48:28 +00001118 &bp->fp_stats[i].eth_q_stats_old;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001119
Mintz Yuval1355b702012-02-15 02:10:22 +00001120 UPDATE_ESTAT_QSTAT(driver_xoff);
1121 UPDATE_ESTAT_QSTAT(rx_err_discard_pkt);
1122 UPDATE_ESTAT_QSTAT(rx_skb_alloc_failed);
1123 UPDATE_ESTAT_QSTAT(hw_csum_err);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001124 }
1125}
1126
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001127static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp)
1128{
1129 u32 val;
1130
1131 if (SHMEM2_HAS(bp, edebug_driver_if[1])) {
1132 val = SHMEM2_RD(bp, edebug_driver_if[1]);
1133
1134 if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT)
1135 return true;
1136 }
1137
1138 return false;
1139}
1140
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001141static void bnx2x_stats_update(struct bnx2x *bp)
1142{
1143 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1144
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001145 if (bnx2x_edebug_stats_stopped(bp))
1146 return;
1147
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001148 if (*stats_comp != DMAE_COMP_VAL)
1149 return;
1150
1151 if (bp->port.pmf)
1152 bnx2x_hw_stats_update(bp);
1153
Dmitry Kravkovbef05402012-09-11 04:34:08 +00001154 if (bnx2x_storm_stats_update(bp)) {
1155 if (bp->stats_pending++ == 3) {
1156 BNX2X_ERR("storm stats were not updated for 3 times\n");
1157 bnx2x_panic();
1158 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001159 return;
1160 }
1161
1162 bnx2x_net_stats_update(bp);
1163 bnx2x_drv_stats_update(bp);
1164
1165 if (netif_msg_timer(bp)) {
1166 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001167
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001168 netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n",
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001169 estats->brb_drop_lo, estats->brb_truncate_lo);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001170 }
1171
1172 bnx2x_hw_stats_post(bp);
1173 bnx2x_storm_stats_post(bp);
1174}
1175
1176static void bnx2x_port_stats_stop(struct bnx2x *bp)
1177{
1178 struct dmae_command *dmae;
1179 u32 opcode;
1180 int loader_idx = PMF_DMAE_C(bp);
1181 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1182
1183 bp->executer_idx = 0;
1184
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001185 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001186
1187 if (bp->port.port_stx) {
1188
1189 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1190 if (bp->func_stx)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001191 dmae->opcode = bnx2x_dmae_opcode_add_comp(
1192 opcode, DMAE_COMP_GRC);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001193 else
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001194 dmae->opcode = bnx2x_dmae_opcode_add_comp(
1195 opcode, DMAE_COMP_PCI);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001196
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001197 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1198 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1199 dmae->dst_addr_lo = bp->port.port_stx >> 2;
1200 dmae->dst_addr_hi = 0;
Barak Witkowski1d187b32011-12-05 22:41:50 +00001201 dmae->len = bnx2x_get_port_stats_dma_len(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001202 if (bp->func_stx) {
1203 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
1204 dmae->comp_addr_hi = 0;
1205 dmae->comp_val = 1;
1206 } else {
1207 dmae->comp_addr_lo =
1208 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1209 dmae->comp_addr_hi =
1210 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1211 dmae->comp_val = DMAE_COMP_VAL;
1212
1213 *stats_comp = 0;
1214 }
1215 }
1216
1217 if (bp->func_stx) {
1218
1219 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001220 dmae->opcode =
1221 bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001222 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
1223 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
1224 dmae->dst_addr_lo = bp->func_stx >> 2;
1225 dmae->dst_addr_hi = 0;
1226 dmae->len = sizeof(struct host_func_stats) >> 2;
1227 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1228 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1229 dmae->comp_val = DMAE_COMP_VAL;
1230
1231 *stats_comp = 0;
1232 }
1233}
1234
1235static void bnx2x_stats_stop(struct bnx2x *bp)
1236{
1237 int update = 0;
1238
1239 bnx2x_stats_comp(bp);
1240
1241 if (bp->port.pmf)
1242 update = (bnx2x_hw_stats_update(bp) == 0);
1243
1244 update |= (bnx2x_storm_stats_update(bp) == 0);
1245
1246 if (update) {
1247 bnx2x_net_stats_update(bp);
1248
1249 if (bp->port.pmf)
1250 bnx2x_port_stats_stop(bp);
1251
1252 bnx2x_hw_stats_post(bp);
1253 bnx2x_stats_comp(bp);
1254 }
1255}
1256
1257static void bnx2x_stats_do_nothing(struct bnx2x *bp)
1258{
1259}
1260
1261static const struct {
1262 void (*action)(struct bnx2x *bp);
1263 enum bnx2x_stats_state next_state;
1264} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
1265/* state event */
1266{
1267/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
1268/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
1269/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
1270/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
1271},
1272{
1273/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
1274/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
1275/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
1276/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
1277}
1278};
1279
1280void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
1281{
David S. Millerbb7e95c2010-07-27 21:01:35 -07001282 enum bnx2x_stats_state state;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001283 if (unlikely(bp->panic))
1284 return;
Dmitry Kravkov4a025f42011-11-13 04:34:30 +00001285
David S. Millerbb7e95c2010-07-27 21:01:35 -07001286 spin_lock_bh(&bp->stats_lock);
1287 state = bp->stats_state;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001288 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
David S. Millerbb7e95c2010-07-27 21:01:35 -07001289 spin_unlock_bh(&bp->stats_lock);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001290
Dmitry Kravkov4a025f42011-11-13 04:34:30 +00001291 bnx2x_stats_stm[state][event].action(bp);
1292
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001293 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
1294 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
1295 state, event, bp->stats_state);
1296}
1297
1298static void bnx2x_port_stats_base_init(struct bnx2x *bp)
1299{
1300 struct dmae_command *dmae;
1301 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1302
1303 /* sanity */
1304 if (!bp->port.pmf || !bp->port.port_stx) {
1305 BNX2X_ERR("BUG!\n");
1306 return;
1307 }
1308
1309 bp->executer_idx = 0;
1310
1311 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001312 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
1313 true, DMAE_COMP_PCI);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001314 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1315 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1316 dmae->dst_addr_lo = bp->port.port_stx >> 2;
1317 dmae->dst_addr_hi = 0;
Barak Witkowski1d187b32011-12-05 22:41:50 +00001318 dmae->len = bnx2x_get_port_stats_dma_len(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001319 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1320 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1321 dmae->comp_val = DMAE_COMP_VAL;
1322
1323 *stats_comp = 0;
1324 bnx2x_hw_stats_post(bp);
1325 bnx2x_stats_comp(bp);
1326}
1327
Ben Hutchings1aa8b472012-07-10 10:56:59 +00001328/* This function will prepare the statistics ramrod data the way
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001329 * we will only have to increment the statistics counter and
1330 * send the ramrod each time we have to.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001331 */
Eric Dumazet1191cb82012-04-27 21:39:21 +00001332static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001333{
1334 int i;
Barak Witkowski50f0a562011-12-05 21:52:23 +00001335 int first_queue_query_index;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001336 struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr;
1337
1338 dma_addr_t cur_data_offset;
1339 struct stats_query_entry *cur_query_entry;
1340
1341 stats_hdr->cmd_num = bp->fw_stats_num;
1342 stats_hdr->drv_stats_counter = 0;
1343
1344 /* storm_counters struct contains the counters of completed
1345 * statistics requests per storm which are incremented by FW
1346 * each time it completes hadning a statistics ramrod. We will
1347 * check these counters in the timer handler and discard a
1348 * (statistics) ramrod completion.
1349 */
1350 cur_data_offset = bp->fw_stats_data_mapping +
1351 offsetof(struct bnx2x_fw_stats_data, storm_counters);
1352
1353 stats_hdr->stats_counters_addrs.hi =
1354 cpu_to_le32(U64_HI(cur_data_offset));
1355 stats_hdr->stats_counters_addrs.lo =
1356 cpu_to_le32(U64_LO(cur_data_offset));
1357
1358 /* prepare to the first stats ramrod (will be completed with
1359 * the counters equal to zero) - init counters to somethig different.
1360 */
1361 memset(&bp->fw_stats_data->storm_counters, 0xff,
1362 sizeof(struct stats_counter));
1363
1364 /**** Port FW statistics data ****/
1365 cur_data_offset = bp->fw_stats_data_mapping +
1366 offsetof(struct bnx2x_fw_stats_data, port);
1367
1368 cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX];
1369
1370 cur_query_entry->kind = STATS_TYPE_PORT;
1371 /* For port query index is a DONT CARE */
1372 cur_query_entry->index = BP_PORT(bp);
1373 /* For port query funcID is a DONT CARE */
1374 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1375 cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
1376 cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
1377
1378 /**** PF FW statistics data ****/
1379 cur_data_offset = bp->fw_stats_data_mapping +
1380 offsetof(struct bnx2x_fw_stats_data, pf);
1381
1382 cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX];
1383
1384 cur_query_entry->kind = STATS_TYPE_PF;
1385 /* For PF query index is a DONT CARE */
1386 cur_query_entry->index = BP_PORT(bp);
1387 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1388 cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
1389 cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
1390
Barak Witkowski50f0a562011-12-05 21:52:23 +00001391 /**** FCoE FW statistics data ****/
1392 if (!NO_FCOE(bp)) {
1393 cur_data_offset = bp->fw_stats_data_mapping +
1394 offsetof(struct bnx2x_fw_stats_data, fcoe);
1395
1396 cur_query_entry =
1397 &bp->fw_stats_req->query[BNX2X_FCOE_QUERY_IDX];
1398
1399 cur_query_entry->kind = STATS_TYPE_FCOE;
1400 /* For FCoE query index is a DONT CARE */
1401 cur_query_entry->index = BP_PORT(bp);
1402 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1403 cur_query_entry->address.hi =
1404 cpu_to_le32(U64_HI(cur_data_offset));
1405 cur_query_entry->address.lo =
1406 cpu_to_le32(U64_LO(cur_data_offset));
1407 }
1408
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001409 /**** Clients' queries ****/
1410 cur_data_offset = bp->fw_stats_data_mapping +
1411 offsetof(struct bnx2x_fw_stats_data, queue_stats);
1412
Barak Witkowski50f0a562011-12-05 21:52:23 +00001413 /* first queue query index depends whether FCoE offloaded request will
1414 * be included in the ramrod
1415 */
1416 if (!NO_FCOE(bp))
1417 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX;
1418 else
1419 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1;
1420
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001421 for_each_eth_queue(bp, i) {
1422 cur_query_entry =
1423 &bp->fw_stats_req->
Barak Witkowski50f0a562011-12-05 21:52:23 +00001424 query[first_queue_query_index + i];
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001425
1426 cur_query_entry->kind = STATS_TYPE_QUEUE;
1427 cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]);
1428 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1429 cur_query_entry->address.hi =
1430 cpu_to_le32(U64_HI(cur_data_offset));
1431 cur_query_entry->address.lo =
1432 cpu_to_le32(U64_LO(cur_data_offset));
1433
1434 cur_data_offset += sizeof(struct per_queue_stats);
1435 }
Barak Witkowski50f0a562011-12-05 21:52:23 +00001436
1437 /* add FCoE queue query if needed */
1438 if (!NO_FCOE(bp)) {
1439 cur_query_entry =
1440 &bp->fw_stats_req->
1441 query[first_queue_query_index + i];
1442
1443 cur_query_entry->kind = STATS_TYPE_QUEUE;
Merav Sicron65565882012-06-19 07:48:26 +00001444 cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX(bp)]);
Barak Witkowski50f0a562011-12-05 21:52:23 +00001445 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1446 cur_query_entry->address.hi =
1447 cpu_to_le32(U64_HI(cur_data_offset));
1448 cur_query_entry->address.lo =
1449 cpu_to_le32(U64_LO(cur_data_offset));
1450 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001451}
1452
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001453void bnx2x_stats_init(struct bnx2x *bp)
1454{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001455 int /*abs*/port = BP_PORT(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001456 int mb_idx = BP_FW_MB_IDX(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001457 int i;
1458
1459 bp->stats_pending = 0;
1460 bp->executer_idx = 0;
1461 bp->stats_counter = 0;
1462
1463 /* port and func stats for management */
1464 if (!BP_NOMCP(bp)) {
1465 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001466 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001467
1468 } else {
1469 bp->port.port_stx = 0;
1470 bp->func_stx = 0;
1471 }
1472 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
1473 bp->port.port_stx, bp->func_stx);
1474
Mintz Yuval1355b702012-02-15 02:10:22 +00001475 /* pmf should retrieve port statistics from SP on a non-init*/
1476 if (!bp->stats_init && bp->port.pmf && bp->port.port_stx)
1477 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
1478
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001479 port = BP_PORT(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001480 /* port stats */
1481 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
1482 bp->port.old_nig_stats.brb_discard =
1483 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
1484 bp->port.old_nig_stats.brb_truncate =
1485 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001486 if (!CHIP_IS_E3(bp)) {
1487 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
1488 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
1489 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
1490 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
1491 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001492
1493 /* function stats */
1494 for_each_queue(bp, i) {
Barak Witkowski15192a82012-06-19 07:48:28 +00001495 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i];
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001496
Barak Witkowski15192a82012-06-19 07:48:28 +00001497 memset(&fp_stats->old_tclient, 0,
1498 sizeof(fp_stats->old_tclient));
1499 memset(&fp_stats->old_uclient, 0,
1500 sizeof(fp_stats->old_uclient));
1501 memset(&fp_stats->old_xclient, 0,
1502 sizeof(fp_stats->old_xclient));
Mintz Yuval1355b702012-02-15 02:10:22 +00001503 if (bp->stats_init) {
Barak Witkowski15192a82012-06-19 07:48:28 +00001504 memset(&fp_stats->eth_q_stats, 0,
1505 sizeof(fp_stats->eth_q_stats));
1506 memset(&fp_stats->eth_q_stats_old, 0,
1507 sizeof(fp_stats->eth_q_stats_old));
Mintz Yuval1355b702012-02-15 02:10:22 +00001508 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001509 }
1510
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001511 /* Prepare statistics ramrod data */
1512 bnx2x_prep_fw_stats_req(bp);
Dmitry Kravkov6fe8bce2010-10-06 03:35:03 +00001513
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001514 memset(&bp->dev->stats, 0, sizeof(bp->dev->stats));
Mintz Yuval1355b702012-02-15 02:10:22 +00001515 if (bp->stats_init) {
1516 memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old));
1517 memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old));
1518 memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old));
1519 memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
Yuval Mintzcb4dca22012-03-18 10:33:44 +00001520 memset(&bp->func_stats, 0, sizeof(bp->func_stats));
Mintz Yuval1355b702012-02-15 02:10:22 +00001521
1522 /* Clean SP from previous statistics */
1523 if (bp->func_stx) {
1524 memset(bnx2x_sp(bp, func_stats), 0,
1525 sizeof(struct host_func_stats));
1526 bnx2x_func_stats_init(bp);
1527 bnx2x_hw_stats_post(bp);
1528 bnx2x_stats_comp(bp);
1529 }
1530 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001531
1532 bp->stats_state = STATS_STATE_DISABLED;
1533
Mintz Yuval1355b702012-02-15 02:10:22 +00001534 if (bp->port.pmf && bp->port.port_stx)
1535 bnx2x_port_stats_base_init(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001536
Mintz Yuval1355b702012-02-15 02:10:22 +00001537 /* mark the end of statistics initializiation */
1538 bp->stats_init = false;
1539}
1540
1541void bnx2x_save_statistics(struct bnx2x *bp)
1542{
1543 int i;
1544 struct net_device_stats *nstats = &bp->dev->stats;
1545
1546 /* save queue statistics */
1547 for_each_eth_queue(bp, i) {
1548 struct bnx2x_fastpath *fp = &bp->fp[i];
Barak Witkowski15192a82012-06-19 07:48:28 +00001549 struct bnx2x_eth_q_stats *qstats =
1550 &bnx2x_fp_stats(bp, fp)->eth_q_stats;
1551 struct bnx2x_eth_q_stats_old *qstats_old =
1552 &bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
Mintz Yuval1355b702012-02-15 02:10:22 +00001553
1554 UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi);
1555 UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo);
1556 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi);
1557 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo);
1558 UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi);
1559 UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo);
1560 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi);
1561 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo);
1562 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi);
1563 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo);
1564 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi);
1565 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo);
1566 UPDATE_QSTAT_OLD(total_tpa_bytes_hi);
1567 UPDATE_QSTAT_OLD(total_tpa_bytes_lo);
1568 }
1569
1570 /* save net_device_stats statistics */
1571 bp->net_stats_old.rx_dropped = nstats->rx_dropped;
1572
1573 /* store port firmware statistics */
1574 if (bp->port.pmf && IS_MF(bp)) {
1575 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1576 struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old;
1577 UPDATE_FW_STAT_OLD(mac_filter_discard);
1578 UPDATE_FW_STAT_OLD(mf_tag_discard);
1579 UPDATE_FW_STAT_OLD(brb_truncate_discard);
1580 UPDATE_FW_STAT_OLD(mac_discard);
1581 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001582}
Barak Witkowskia3348722012-04-23 03:04:46 +00001583
1584void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
1585 u32 stats_type)
1586{
1587 int i;
1588 struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats;
1589 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1590 struct per_queue_stats *fcoe_q_stats =
Merav Sicron65565882012-06-19 07:48:26 +00001591 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)];
Barak Witkowskia3348722012-04-23 03:04:46 +00001592
1593 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
1594 &fcoe_q_stats->tstorm_queue_statistics;
1595
1596 struct ustorm_per_queue_stats *fcoe_q_ustorm_stats =
1597 &fcoe_q_stats->ustorm_queue_statistics;
1598
1599 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
1600 &fcoe_q_stats->xstorm_queue_statistics;
1601
1602 struct fcoe_statistics_params *fw_fcoe_stat =
1603 &bp->fw_stats_data->fcoe;
1604
1605 memset(afex_stats, 0, sizeof(struct afex_stats));
1606
1607 for_each_eth_queue(bp, i) {
Barak Witkowski15192a82012-06-19 07:48:28 +00001608 struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
Barak Witkowskia3348722012-04-23 03:04:46 +00001609
1610 ADD_64(afex_stats->rx_unicast_bytes_hi,
1611 qstats->total_unicast_bytes_received_hi,
1612 afex_stats->rx_unicast_bytes_lo,
1613 qstats->total_unicast_bytes_received_lo);
1614
1615 ADD_64(afex_stats->rx_broadcast_bytes_hi,
1616 qstats->total_broadcast_bytes_received_hi,
1617 afex_stats->rx_broadcast_bytes_lo,
1618 qstats->total_broadcast_bytes_received_lo);
1619
1620 ADD_64(afex_stats->rx_multicast_bytes_hi,
1621 qstats->total_multicast_bytes_received_hi,
1622 afex_stats->rx_multicast_bytes_lo,
1623 qstats->total_multicast_bytes_received_lo);
1624
1625 ADD_64(afex_stats->rx_unicast_frames_hi,
1626 qstats->total_unicast_packets_received_hi,
1627 afex_stats->rx_unicast_frames_lo,
1628 qstats->total_unicast_packets_received_lo);
1629
1630 ADD_64(afex_stats->rx_broadcast_frames_hi,
1631 qstats->total_broadcast_packets_received_hi,
1632 afex_stats->rx_broadcast_frames_lo,
1633 qstats->total_broadcast_packets_received_lo);
1634
1635 ADD_64(afex_stats->rx_multicast_frames_hi,
1636 qstats->total_multicast_packets_received_hi,
1637 afex_stats->rx_multicast_frames_lo,
1638 qstats->total_multicast_packets_received_lo);
1639
1640 /* sum to rx_frames_discarded all discraded
1641 * packets due to size, ttl0 and checksum
1642 */
1643 ADD_64(afex_stats->rx_frames_discarded_hi,
1644 qstats->total_packets_received_checksum_discarded_hi,
1645 afex_stats->rx_frames_discarded_lo,
1646 qstats->total_packets_received_checksum_discarded_lo);
1647
1648 ADD_64(afex_stats->rx_frames_discarded_hi,
1649 qstats->total_packets_received_ttl0_discarded_hi,
1650 afex_stats->rx_frames_discarded_lo,
1651 qstats->total_packets_received_ttl0_discarded_lo);
1652
1653 ADD_64(afex_stats->rx_frames_discarded_hi,
1654 qstats->etherstatsoverrsizepkts_hi,
1655 afex_stats->rx_frames_discarded_lo,
1656 qstats->etherstatsoverrsizepkts_lo);
1657
1658 ADD_64(afex_stats->rx_frames_dropped_hi,
1659 qstats->no_buff_discard_hi,
1660 afex_stats->rx_frames_dropped_lo,
1661 qstats->no_buff_discard_lo);
1662
1663 ADD_64(afex_stats->tx_unicast_bytes_hi,
1664 qstats->total_unicast_bytes_transmitted_hi,
1665 afex_stats->tx_unicast_bytes_lo,
1666 qstats->total_unicast_bytes_transmitted_lo);
1667
1668 ADD_64(afex_stats->tx_broadcast_bytes_hi,
1669 qstats->total_broadcast_bytes_transmitted_hi,
1670 afex_stats->tx_broadcast_bytes_lo,
1671 qstats->total_broadcast_bytes_transmitted_lo);
1672
1673 ADD_64(afex_stats->tx_multicast_bytes_hi,
1674 qstats->total_multicast_bytes_transmitted_hi,
1675 afex_stats->tx_multicast_bytes_lo,
1676 qstats->total_multicast_bytes_transmitted_lo);
1677
1678 ADD_64(afex_stats->tx_unicast_frames_hi,
1679 qstats->total_unicast_packets_transmitted_hi,
1680 afex_stats->tx_unicast_frames_lo,
1681 qstats->total_unicast_packets_transmitted_lo);
1682
1683 ADD_64(afex_stats->tx_broadcast_frames_hi,
1684 qstats->total_broadcast_packets_transmitted_hi,
1685 afex_stats->tx_broadcast_frames_lo,
1686 qstats->total_broadcast_packets_transmitted_lo);
1687
1688 ADD_64(afex_stats->tx_multicast_frames_hi,
1689 qstats->total_multicast_packets_transmitted_hi,
1690 afex_stats->tx_multicast_frames_lo,
1691 qstats->total_multicast_packets_transmitted_lo);
1692
1693 ADD_64(afex_stats->tx_frames_dropped_hi,
1694 qstats->total_transmitted_dropped_packets_error_hi,
1695 afex_stats->tx_frames_dropped_lo,
1696 qstats->total_transmitted_dropped_packets_error_lo);
1697 }
1698
1699 /* now add FCoE statistics which are collected separately
1700 * (both offloaded and non offloaded)
1701 */
1702 if (!NO_FCOE(bp)) {
1703 ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
1704 LE32_0,
1705 afex_stats->rx_unicast_bytes_lo,
1706 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
1707
1708 ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
1709 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
1710 afex_stats->rx_unicast_bytes_lo,
1711 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
1712
1713 ADD_64_LE(afex_stats->rx_broadcast_bytes_hi,
1714 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
1715 afex_stats->rx_broadcast_bytes_lo,
1716 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
1717
1718 ADD_64_LE(afex_stats->rx_multicast_bytes_hi,
1719 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
1720 afex_stats->rx_multicast_bytes_lo,
1721 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
1722
1723 ADD_64_LE(afex_stats->rx_unicast_frames_hi,
1724 LE32_0,
1725 afex_stats->rx_unicast_frames_lo,
1726 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
1727
1728 ADD_64_LE(afex_stats->rx_unicast_frames_hi,
1729 LE32_0,
1730 afex_stats->rx_unicast_frames_lo,
1731 fcoe_q_tstorm_stats->rcv_ucast_pkts);
1732
1733 ADD_64_LE(afex_stats->rx_broadcast_frames_hi,
1734 LE32_0,
1735 afex_stats->rx_broadcast_frames_lo,
1736 fcoe_q_tstorm_stats->rcv_bcast_pkts);
1737
1738 ADD_64_LE(afex_stats->rx_multicast_frames_hi,
1739 LE32_0,
1740 afex_stats->rx_multicast_frames_lo,
1741 fcoe_q_tstorm_stats->rcv_ucast_pkts);
1742
1743 ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1744 LE32_0,
1745 afex_stats->rx_frames_discarded_lo,
1746 fcoe_q_tstorm_stats->checksum_discard);
1747
1748 ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1749 LE32_0,
1750 afex_stats->rx_frames_discarded_lo,
1751 fcoe_q_tstorm_stats->pkts_too_big_discard);
1752
1753 ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1754 LE32_0,
1755 afex_stats->rx_frames_discarded_lo,
1756 fcoe_q_tstorm_stats->ttl0_discard);
1757
1758 ADD_64_LE16(afex_stats->rx_frames_dropped_hi,
1759 LE16_0,
1760 afex_stats->rx_frames_dropped_lo,
1761 fcoe_q_tstorm_stats->no_buff_discard);
1762
1763 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1764 LE32_0,
1765 afex_stats->rx_frames_dropped_lo,
1766 fcoe_q_ustorm_stats->ucast_no_buff_pkts);
1767
1768 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1769 LE32_0,
1770 afex_stats->rx_frames_dropped_lo,
1771 fcoe_q_ustorm_stats->mcast_no_buff_pkts);
1772
1773 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1774 LE32_0,
1775 afex_stats->rx_frames_dropped_lo,
1776 fcoe_q_ustorm_stats->bcast_no_buff_pkts);
1777
1778 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1779 LE32_0,
1780 afex_stats->rx_frames_dropped_lo,
1781 fw_fcoe_stat->rx_stat1.fcoe_rx_drop_pkt_cnt);
1782
1783 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1784 LE32_0,
1785 afex_stats->rx_frames_dropped_lo,
1786 fw_fcoe_stat->rx_stat2.fcoe_rx_drop_pkt_cnt);
1787
1788 ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
1789 LE32_0,
1790 afex_stats->tx_unicast_bytes_lo,
1791 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
1792
1793 ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
1794 fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
1795 afex_stats->tx_unicast_bytes_lo,
1796 fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
1797
1798 ADD_64_LE(afex_stats->tx_broadcast_bytes_hi,
1799 fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
1800 afex_stats->tx_broadcast_bytes_lo,
1801 fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
1802
1803 ADD_64_LE(afex_stats->tx_multicast_bytes_hi,
1804 fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
1805 afex_stats->tx_multicast_bytes_lo,
1806 fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
1807
1808 ADD_64_LE(afex_stats->tx_unicast_frames_hi,
1809 LE32_0,
1810 afex_stats->tx_unicast_frames_lo,
1811 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
1812
1813 ADD_64_LE(afex_stats->tx_unicast_frames_hi,
1814 LE32_0,
1815 afex_stats->tx_unicast_frames_lo,
1816 fcoe_q_xstorm_stats->ucast_pkts_sent);
1817
1818 ADD_64_LE(afex_stats->tx_broadcast_frames_hi,
1819 LE32_0,
1820 afex_stats->tx_broadcast_frames_lo,
1821 fcoe_q_xstorm_stats->bcast_pkts_sent);
1822
1823 ADD_64_LE(afex_stats->tx_multicast_frames_hi,
1824 LE32_0,
1825 afex_stats->tx_multicast_frames_lo,
1826 fcoe_q_xstorm_stats->mcast_pkts_sent);
1827
1828 ADD_64_LE(afex_stats->tx_frames_dropped_hi,
1829 LE32_0,
1830 afex_stats->tx_frames_dropped_lo,
1831 fcoe_q_xstorm_stats->error_drop_pkts);
1832 }
1833
1834 /* if port stats are requested, add them to the PMF
1835 * stats, as anyway they will be accumulated by the
1836 * MCP before sent to the switch
1837 */
1838 if ((bp->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) {
1839 ADD_64(afex_stats->rx_frames_dropped_hi,
1840 0,
1841 afex_stats->rx_frames_dropped_lo,
1842 estats->mac_filter_discard);
1843 ADD_64(afex_stats->rx_frames_dropped_hi,
1844 0,
1845 afex_stats->rx_frames_dropped_lo,
1846 estats->brb_truncate_discard);
1847 ADD_64(afex_stats->rx_frames_discarded_hi,
1848 0,
1849 afex_stats->rx_frames_discarded_lo,
1850 estats->mac_discard);
1851 }
1852}