blob: c9435456294f2c3bd26366758c4f41b2987da430 [file] [log] [blame]
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001/* bnx2x_stats.c: Broadcom Everest network driver.
2 *
Dmitry Kravkov5de92402011-05-04 23:51:13 +00003 * Copyright (c) 2007-2011 Broadcom Corporation
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
Joe Perchesf1deab52011-08-14 12:16:21 +000017
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Dmitry Kravkovf85582f2010-10-06 03:34:21 +000020#include "bnx2x_stats.h"
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030021#include "bnx2x_cmn.h"
22
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000023
24/* Statistics */
25
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000026/*
27 * General service functions
28 */
29
30static inline long bnx2x_hilo(u32 *hiref)
31{
32 u32 lo = *(hiref + 1);
33#if (BITS_PER_LONG == 64)
34 u32 hi = *hiref;
35
36 return HILO_U64(hi, lo);
37#else
38 return lo;
39#endif
40}
41
42/*
43 * Init service functions
44 */
45
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030046/* Post the next statistics ramrod. Protect it with the spin in
47 * order to ensure the strict order between statistics ramrods
48 * (each ramrod has a sequence number passed in a
49 * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be
50 * sent in order).
51 */
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000052static void bnx2x_storm_stats_post(struct bnx2x *bp)
53{
54 if (!bp->stats_pending) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030055 int rc;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000056
David S. Millerbb7e95c2010-07-27 21:01:35 -070057 spin_lock_bh(&bp->stats_lock);
58
Vladislav Zolotarov9bcb8012011-01-09 02:20:34 +000059 if (bp->stats_pending) {
60 spin_unlock_bh(&bp->stats_lock);
61 return;
62 }
63
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030064 bp->fw_stats_req->hdr.drv_stats_counter =
65 cpu_to_le16(bp->stats_counter++);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000066
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030067 DP(NETIF_MSG_TIMER, "Sending statistics ramrod %d\n",
68 bp->fw_stats_req->hdr.drv_stats_counter);
69
70
71
72 /* send FW stats ramrod */
Dmitry Kravkov523224a2010-10-06 03:23:26 +000073 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030074 U64_HI(bp->fw_stats_req_mapping),
75 U64_LO(bp->fw_stats_req_mapping),
76 NONE_CONNECTION_TYPE);
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +000077 if (rc == 0)
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000078 bp->stats_pending = 1;
David S. Millerbb7e95c2010-07-27 21:01:35 -070079
80 spin_unlock_bh(&bp->stats_lock);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000081 }
82}
83
84static void bnx2x_hw_stats_post(struct bnx2x *bp)
85{
86 struct dmae_command *dmae = &bp->stats_dmae;
87 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
88
89 *stats_comp = DMAE_COMP_VAL;
90 if (CHIP_REV_IS_SLOW(bp))
91 return;
92
93 /* loader */
94 if (bp->executer_idx) {
95 int loader_idx = PMF_DMAE_C(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000096 u32 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
97 true, DMAE_COMP_GRC);
98 opcode = bnx2x_dmae_opcode_clr_src_reset(opcode);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +000099
100 memset(dmae, 0, sizeof(struct dmae_command));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000101 dmae->opcode = opcode;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000102 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
103 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
104 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
105 sizeof(struct dmae_command) *
106 (loader_idx + 1)) >> 2;
107 dmae->dst_addr_hi = 0;
108 dmae->len = sizeof(struct dmae_command) >> 2;
109 if (CHIP_IS_E1(bp))
110 dmae->len--;
111 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
112 dmae->comp_addr_hi = 0;
113 dmae->comp_val = 1;
114
115 *stats_comp = 0;
116 bnx2x_post_dmae(bp, dmae, loader_idx);
117
118 } else if (bp->func_stx) {
119 *stats_comp = 0;
120 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
121 }
122}
123
124static int bnx2x_stats_comp(struct bnx2x *bp)
125{
126 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
127 int cnt = 10;
128
129 might_sleep();
130 while (*stats_comp != DMAE_COMP_VAL) {
131 if (!cnt) {
132 BNX2X_ERR("timeout waiting for stats finished\n");
133 break;
134 }
135 cnt--;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300136 usleep_range(1000, 1000);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000137 }
138 return 1;
139}
140
141/*
142 * Statistics service functions
143 */
144
145static void bnx2x_stats_pmf_update(struct bnx2x *bp)
146{
147 struct dmae_command *dmae;
148 u32 opcode;
149 int loader_idx = PMF_DMAE_C(bp);
150 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
151
152 /* sanity */
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +0000153 if (!IS_MF(bp) || !bp->port.pmf || !bp->port.port_stx) {
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000154 BNX2X_ERR("BUG!\n");
155 return;
156 }
157
158 bp->executer_idx = 0;
159
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000160 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000161
162 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000163 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000164 dmae->src_addr_lo = bp->port.port_stx >> 2;
165 dmae->src_addr_hi = 0;
166 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
167 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
168 dmae->len = DMAE_LEN32_RD_MAX;
169 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
170 dmae->comp_addr_hi = 0;
171 dmae->comp_val = 1;
172
173 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000174 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000175 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
176 dmae->src_addr_hi = 0;
177 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
178 DMAE_LEN32_RD_MAX * 4);
179 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
180 DMAE_LEN32_RD_MAX * 4);
181 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
182 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
183 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
184 dmae->comp_val = DMAE_COMP_VAL;
185
186 *stats_comp = 0;
187 bnx2x_hw_stats_post(bp);
188 bnx2x_stats_comp(bp);
189}
190
191static void bnx2x_port_stats_init(struct bnx2x *bp)
192{
193 struct dmae_command *dmae;
194 int port = BP_PORT(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000195 u32 opcode;
196 int loader_idx = PMF_DMAE_C(bp);
197 u32 mac_addr;
198 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
199
200 /* sanity */
201 if (!bp->link_vars.link_up || !bp->port.pmf) {
202 BNX2X_ERR("BUG!\n");
203 return;
204 }
205
206 bp->executer_idx = 0;
207
208 /* MCP */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000209 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
210 true, DMAE_COMP_GRC);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000211
212 if (bp->port.port_stx) {
213
214 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
215 dmae->opcode = opcode;
216 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
217 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
218 dmae->dst_addr_lo = bp->port.port_stx >> 2;
219 dmae->dst_addr_hi = 0;
220 dmae->len = sizeof(struct host_port_stats) >> 2;
221 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
222 dmae->comp_addr_hi = 0;
223 dmae->comp_val = 1;
224 }
225
226 if (bp->func_stx) {
227
228 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
229 dmae->opcode = opcode;
230 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
231 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
232 dmae->dst_addr_lo = bp->func_stx >> 2;
233 dmae->dst_addr_hi = 0;
234 dmae->len = sizeof(struct host_func_stats) >> 2;
235 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
236 dmae->comp_addr_hi = 0;
237 dmae->comp_val = 1;
238 }
239
240 /* MAC */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000241 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
242 true, DMAE_COMP_GRC);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000243
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300244 /* EMAC is special */
245 if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000246 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
247
248 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
249 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
250 dmae->opcode = opcode;
251 dmae->src_addr_lo = (mac_addr +
252 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
253 dmae->src_addr_hi = 0;
254 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
255 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
256 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
257 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
258 dmae->comp_addr_hi = 0;
259 dmae->comp_val = 1;
260
261 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
262 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
263 dmae->opcode = opcode;
264 dmae->src_addr_lo = (mac_addr +
265 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
266 dmae->src_addr_hi = 0;
267 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
268 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
269 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
270 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
271 dmae->len = 1;
272 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
273 dmae->comp_addr_hi = 0;
274 dmae->comp_val = 1;
275
276 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
277 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
278 dmae->opcode = opcode;
279 dmae->src_addr_lo = (mac_addr +
280 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
281 dmae->src_addr_hi = 0;
282 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
283 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
284 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
285 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
286 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
287 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
288 dmae->comp_addr_hi = 0;
289 dmae->comp_val = 1;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300290 } else {
291 u32 tx_src_addr_lo, rx_src_addr_lo;
292 u16 rx_len, tx_len;
293
294 /* configure the params according to MAC type */
295 switch (bp->link_vars.mac_type) {
296 case MAC_TYPE_BMAC:
297 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
298 NIG_REG_INGRESS_BMAC0_MEM);
299
300 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
301 BIGMAC_REGISTER_TX_STAT_GTBYT */
302 if (CHIP_IS_E1x(bp)) {
303 tx_src_addr_lo = (mac_addr +
304 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
305 tx_len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
306 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
307 rx_src_addr_lo = (mac_addr +
308 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
309 rx_len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
310 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
311 } else {
312 tx_src_addr_lo = (mac_addr +
313 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
314 tx_len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
315 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
316 rx_src_addr_lo = (mac_addr +
317 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
318 rx_len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
319 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
320 }
321 break;
322
323 case MAC_TYPE_UMAC: /* handled by MSTAT */
324 case MAC_TYPE_XMAC: /* handled by MSTAT */
325 default:
326 mac_addr = port ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0;
327 tx_src_addr_lo = (mac_addr +
328 MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2;
329 rx_src_addr_lo = (mac_addr +
330 MSTAT_REG_RX_STAT_GR64_LO) >> 2;
331 tx_len = sizeof(bp->slowpath->
332 mac_stats.mstat_stats.stats_tx) >> 2;
333 rx_len = sizeof(bp->slowpath->
334 mac_stats.mstat_stats.stats_rx) >> 2;
335 break;
336 }
337
338 /* TX stats */
339 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
340 dmae->opcode = opcode;
341 dmae->src_addr_lo = tx_src_addr_lo;
342 dmae->src_addr_hi = 0;
343 dmae->len = tx_len;
344 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
345 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
346 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
347 dmae->comp_addr_hi = 0;
348 dmae->comp_val = 1;
349
350 /* RX stats */
351 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
352 dmae->opcode = opcode;
353 dmae->src_addr_hi = 0;
354 dmae->src_addr_lo = rx_src_addr_lo;
355 dmae->dst_addr_lo =
356 U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
357 dmae->dst_addr_hi =
358 U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
359 dmae->len = rx_len;
360 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
361 dmae->comp_addr_hi = 0;
362 dmae->comp_val = 1;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000363 }
364
365 /* NIG */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300366 if (!CHIP_IS_E3(bp)) {
367 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
368 dmae->opcode = opcode;
369 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
370 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
371 dmae->src_addr_hi = 0;
372 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
373 offsetof(struct nig_stats, egress_mac_pkt0_lo));
374 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
375 offsetof(struct nig_stats, egress_mac_pkt0_lo));
376 dmae->len = (2*sizeof(u32)) >> 2;
377 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
378 dmae->comp_addr_hi = 0;
379 dmae->comp_val = 1;
380
381 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
382 dmae->opcode = opcode;
383 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
384 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
385 dmae->src_addr_hi = 0;
386 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
387 offsetof(struct nig_stats, egress_mac_pkt1_lo));
388 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
389 offsetof(struct nig_stats, egress_mac_pkt1_lo));
390 dmae->len = (2*sizeof(u32)) >> 2;
391 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
392 dmae->comp_addr_hi = 0;
393 dmae->comp_val = 1;
394 }
395
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000396 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300397 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
398 true, DMAE_COMP_PCI);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000399 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
400 NIG_REG_STAT0_BRB_DISCARD) >> 2;
401 dmae->src_addr_hi = 0;
402 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
403 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
404 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000405
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000406 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
407 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
408 dmae->comp_val = DMAE_COMP_VAL;
409
410 *stats_comp = 0;
411}
412
413static void bnx2x_func_stats_init(struct bnx2x *bp)
414{
415 struct dmae_command *dmae = &bp->stats_dmae;
416 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
417
418 /* sanity */
419 if (!bp->func_stx) {
420 BNX2X_ERR("BUG!\n");
421 return;
422 }
423
424 bp->executer_idx = 0;
425 memset(dmae, 0, sizeof(struct dmae_command));
426
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000427 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
428 true, DMAE_COMP_PCI);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000429 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
430 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
431 dmae->dst_addr_lo = bp->func_stx >> 2;
432 dmae->dst_addr_hi = 0;
433 dmae->len = sizeof(struct host_func_stats) >> 2;
434 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
435 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
436 dmae->comp_val = DMAE_COMP_VAL;
437
438 *stats_comp = 0;
439}
440
441static void bnx2x_stats_start(struct bnx2x *bp)
442{
443 if (bp->port.pmf)
444 bnx2x_port_stats_init(bp);
445
446 else if (bp->func_stx)
447 bnx2x_func_stats_init(bp);
448
449 bnx2x_hw_stats_post(bp);
450 bnx2x_storm_stats_post(bp);
451}
452
453static void bnx2x_stats_pmf_start(struct bnx2x *bp)
454{
455 bnx2x_stats_comp(bp);
456 bnx2x_stats_pmf_update(bp);
457 bnx2x_stats_start(bp);
458}
459
460static void bnx2x_stats_restart(struct bnx2x *bp)
461{
462 bnx2x_stats_comp(bp);
463 bnx2x_stats_start(bp);
464}
465
466static void bnx2x_bmac_stats_update(struct bnx2x *bp)
467{
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000468 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
469 struct bnx2x_eth_stats *estats = &bp->eth_stats;
470 struct {
471 u32 lo;
472 u32 hi;
473 } diff;
474
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000475 if (CHIP_IS_E1x(bp)) {
476 struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats);
477
478 /* the macros below will use "bmac1_stats" type */
479 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
480 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
481 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
482 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
483 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
484 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
485 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
486 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300487 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
488
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000489 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
490 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
491 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
492 UPDATE_STAT64(tx_stat_gt127,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000493 tx_stat_etherstatspkts65octetsto127octets);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000494 UPDATE_STAT64(tx_stat_gt255,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000495 tx_stat_etherstatspkts128octetsto255octets);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000496 UPDATE_STAT64(tx_stat_gt511,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000497 tx_stat_etherstatspkts256octetsto511octets);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000498 UPDATE_STAT64(tx_stat_gt1023,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000499 tx_stat_etherstatspkts512octetsto1023octets);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000500 UPDATE_STAT64(tx_stat_gt1518,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000501 tx_stat_etherstatspkts1024octetsto1522octets);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300502 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
503 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
504 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
505 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000506 UPDATE_STAT64(tx_stat_gterr,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000507 tx_stat_dot3statsinternalmactransmiterrors);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300508 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000509
510 } else {
511 struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats);
512
513 /* the macros below will use "bmac2_stats" type */
514 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
515 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
516 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
517 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
518 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
519 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
520 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
521 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300522 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000523 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
524 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
525 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
526 UPDATE_STAT64(tx_stat_gt127,
527 tx_stat_etherstatspkts65octetsto127octets);
528 UPDATE_STAT64(tx_stat_gt255,
529 tx_stat_etherstatspkts128octetsto255octets);
530 UPDATE_STAT64(tx_stat_gt511,
531 tx_stat_etherstatspkts256octetsto511octets);
532 UPDATE_STAT64(tx_stat_gt1023,
533 tx_stat_etherstatspkts512octetsto1023octets);
534 UPDATE_STAT64(tx_stat_gt1518,
535 tx_stat_etherstatspkts1024octetsto1522octets);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300536 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
537 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
538 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
539 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000540 UPDATE_STAT64(tx_stat_gterr,
541 tx_stat_dot3statsinternalmactransmiterrors);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300542 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000543
544 /* collect PFC stats */
545 DIFF_64(diff.hi, new->tx_stat_gtpp_hi,
546 pstats->pfc_frames_tx_hi,
547 diff.lo, new->tx_stat_gtpp_lo,
548 pstats->pfc_frames_tx_lo);
549 pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
550 pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
551 ADD_64(pstats->pfc_frames_tx_hi, diff.hi,
552 pstats->pfc_frames_tx_lo, diff.lo);
553
554 DIFF_64(diff.hi, new->rx_stat_grpp_hi,
555 pstats->pfc_frames_rx_hi,
556 diff.lo, new->rx_stat_grpp_lo,
557 pstats->pfc_frames_rx_lo);
558 pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
559 pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
560 ADD_64(pstats->pfc_frames_rx_hi, diff.hi,
561 pstats->pfc_frames_rx_lo, diff.lo);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000562 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000563
564 estats->pause_frames_received_hi =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300565 pstats->mac_stx[1].rx_stat_mac_xpf_hi;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000566 estats->pause_frames_received_lo =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300567 pstats->mac_stx[1].rx_stat_mac_xpf_lo;
568
569 estats->pause_frames_sent_hi =
570 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
571 estats->pause_frames_sent_lo =
572 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000573
574 estats->pfc_frames_received_hi =
575 pstats->pfc_frames_rx_hi;
576 estats->pfc_frames_received_lo =
577 pstats->pfc_frames_rx_lo;
578 estats->pfc_frames_sent_hi =
579 pstats->pfc_frames_tx_hi;
580 estats->pfc_frames_sent_lo =
581 pstats->pfc_frames_tx_lo;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300582}
583
584static void bnx2x_mstat_stats_update(struct bnx2x *bp)
585{
586 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
587 struct bnx2x_eth_stats *estats = &bp->eth_stats;
588
589 struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats);
590
591 ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets);
592 ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors);
593 ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts);
594 ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong);
595 ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments);
596 ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived);
597 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered);
598 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf);
599 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
600 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
601
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000602 /* collect pfc stats */
603 ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi,
604 pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo);
605 ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi,
606 pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300607
608 ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
609 ADD_STAT64(stats_tx.tx_gt127,
610 tx_stat_etherstatspkts65octetsto127octets);
611 ADD_STAT64(stats_tx.tx_gt255,
612 tx_stat_etherstatspkts128octetsto255octets);
613 ADD_STAT64(stats_tx.tx_gt511,
614 tx_stat_etherstatspkts256octetsto511octets);
615 ADD_STAT64(stats_tx.tx_gt1023,
616 tx_stat_etherstatspkts512octetsto1023octets);
617 ADD_STAT64(stats_tx.tx_gt1518,
618 tx_stat_etherstatspkts1024octetsto1522octets);
619 ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047);
620
621 ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095);
622 ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216);
623 ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383);
624
625 ADD_STAT64(stats_tx.tx_gterr,
626 tx_stat_dot3statsinternalmactransmiterrors);
627 ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl);
628
629 ADD_64(estats->etherstatspkts1024octetsto1522octets_hi,
630 new->stats_tx.tx_gt1518_hi,
631 estats->etherstatspkts1024octetsto1522octets_lo,
632 new->stats_tx.tx_gt1518_lo);
633
634 ADD_64(estats->etherstatspktsover1522octets_hi,
635 new->stats_tx.tx_gt2047_hi,
636 estats->etherstatspktsover1522octets_lo,
637 new->stats_tx.tx_gt2047_lo);
638
639 ADD_64(estats->etherstatspktsover1522octets_hi,
640 new->stats_tx.tx_gt4095_hi,
641 estats->etherstatspktsover1522octets_lo,
642 new->stats_tx.tx_gt4095_lo);
643
644 ADD_64(estats->etherstatspktsover1522octets_hi,
645 new->stats_tx.tx_gt9216_hi,
646 estats->etherstatspktsover1522octets_lo,
647 new->stats_tx.tx_gt9216_lo);
648
649
650 ADD_64(estats->etherstatspktsover1522octets_hi,
651 new->stats_tx.tx_gt16383_hi,
652 estats->etherstatspktsover1522octets_lo,
653 new->stats_tx.tx_gt16383_lo);
654
655 estats->pause_frames_received_hi =
656 pstats->mac_stx[1].rx_stat_mac_xpf_hi;
657 estats->pause_frames_received_lo =
658 pstats->mac_stx[1].rx_stat_mac_xpf_lo;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000659
660 estats->pause_frames_sent_hi =
661 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
662 estats->pause_frames_sent_lo =
663 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000664
665 estats->pfc_frames_received_hi =
666 pstats->pfc_frames_rx_hi;
667 estats->pfc_frames_received_lo =
668 pstats->pfc_frames_rx_lo;
669 estats->pfc_frames_sent_hi =
670 pstats->pfc_frames_tx_hi;
671 estats->pfc_frames_sent_lo =
672 pstats->pfc_frames_tx_lo;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000673}
674
675static void bnx2x_emac_stats_update(struct bnx2x *bp)
676{
677 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
678 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
679 struct bnx2x_eth_stats *estats = &bp->eth_stats;
680
681 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
682 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
683 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
684 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
685 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
686 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
687 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
688 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
689 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
690 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
691 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
692 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
693 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
694 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
695 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
696 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
697 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
698 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
699 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
700 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
701 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
702 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
703 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
704 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
705 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
706 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
707 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
708 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
709 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
710 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
711 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
712
713 estats->pause_frames_received_hi =
714 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
715 estats->pause_frames_received_lo =
716 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
717 ADD_64(estats->pause_frames_received_hi,
718 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
719 estats->pause_frames_received_lo,
720 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
721
722 estats->pause_frames_sent_hi =
723 pstats->mac_stx[1].tx_stat_outxonsent_hi;
724 estats->pause_frames_sent_lo =
725 pstats->mac_stx[1].tx_stat_outxonsent_lo;
726 ADD_64(estats->pause_frames_sent_hi,
727 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
728 estats->pause_frames_sent_lo,
729 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
730}
731
732static int bnx2x_hw_stats_update(struct bnx2x *bp)
733{
734 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
735 struct nig_stats *old = &(bp->port.old_nig_stats);
736 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
737 struct bnx2x_eth_stats *estats = &bp->eth_stats;
738 struct {
739 u32 lo;
740 u32 hi;
741 } diff;
742
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300743 switch (bp->link_vars.mac_type) {
744 case MAC_TYPE_BMAC:
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000745 bnx2x_bmac_stats_update(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300746 break;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000747
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300748 case MAC_TYPE_EMAC:
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000749 bnx2x_emac_stats_update(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300750 break;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000751
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300752 case MAC_TYPE_UMAC:
753 case MAC_TYPE_XMAC:
754 bnx2x_mstat_stats_update(bp);
755 break;
756
757 case MAC_TYPE_NONE: /* unreached */
David S. Miller8decf862011-09-22 03:23:13 -0400758 DP(BNX2X_MSG_STATS,
759 "stats updated by DMAE but no MAC active\n");
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000760 return -1;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300761
762 default: /* unreached */
763 BNX2X_ERR("Unknown MAC type\n");
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000764 }
765
766 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
767 new->brb_discard - old->brb_discard);
768 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
769 new->brb_truncate - old->brb_truncate);
770
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300771 if (!CHIP_IS_E3(bp)) {
772 UPDATE_STAT64_NIG(egress_mac_pkt0,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000773 etherstatspkts1024octetsto1522octets);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300774 UPDATE_STAT64_NIG(egress_mac_pkt1,
775 etherstatspktsover1522octets);
776 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000777
778 memcpy(old, new, sizeof(struct nig_stats));
779
780 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
781 sizeof(struct mac_stx));
782 estats->brb_drop_hi = pstats->brb_drop_hi;
783 estats->brb_drop_lo = pstats->brb_drop_lo;
784
Barak Witkowski0e898dd2011-12-05 21:52:22 +0000785 pstats->host_port_stats_counter++;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000786
787 if (!BP_NOMCP(bp)) {
788 u32 nig_timer_max =
789 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
790 if (nig_timer_max != estats->nig_timer_max) {
791 estats->nig_timer_max = nig_timer_max;
792 BNX2X_ERR("NIG timer max (%u)\n",
793 estats->nig_timer_max);
794 }
795 }
796
797 return 0;
798}
799
800static int bnx2x_storm_stats_update(struct bnx2x *bp)
801{
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000802 struct tstorm_per_port_stats *tport =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300803 &bp->fw_stats_data->port.tstorm_port_statistics;
804 struct tstorm_per_pf_stats *tfunc =
805 &bp->fw_stats_data->pf.tstorm_pf_statistics;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000806 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
807 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300808 struct stats_counter *counters = &bp->fw_stats_data->storm_counters;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000809 int i;
David S. Millerbb7e95c2010-07-27 21:01:35 -0700810 u16 cur_stats_counter;
811
812 /* Make sure we use the value of the counter
813 * used for sending the last stats ramrod.
814 */
815 spin_lock_bh(&bp->stats_lock);
816 cur_stats_counter = bp->stats_counter - 1;
817 spin_unlock_bh(&bp->stats_lock);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000818
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300819 /* are storm stats valid? */
820 if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) {
821 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
822 " xstorm counter (0x%x) != stats_counter (0x%x)\n",
823 le16_to_cpu(counters->xstats_counter), bp->stats_counter);
824 return -EAGAIN;
825 }
826
827 if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) {
828 DP(BNX2X_MSG_STATS, "stats not updated by ustorm"
829 " ustorm counter (0x%x) != stats_counter (0x%x)\n",
830 le16_to_cpu(counters->ustats_counter), bp->stats_counter);
831 return -EAGAIN;
832 }
833
834 if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) {
835 DP(BNX2X_MSG_STATS, "stats not updated by cstorm"
836 " cstorm counter (0x%x) != stats_counter (0x%x)\n",
837 le16_to_cpu(counters->cstats_counter), bp->stats_counter);
838 return -EAGAIN;
839 }
840
841 if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) {
842 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
843 " tstorm counter (0x%x) != stats_counter (0x%x)\n",
844 le16_to_cpu(counters->tstats_counter), bp->stats_counter);
845 return -EAGAIN;
846 }
847
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000848 memcpy(&(fstats->total_bytes_received_hi),
849 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
850 sizeof(struct host_func_stats) - 2*sizeof(u32));
851 estats->error_bytes_received_hi = 0;
852 estats->error_bytes_received_lo = 0;
853 estats->etherstatsoverrsizepkts_hi = 0;
854 estats->etherstatsoverrsizepkts_lo = 0;
855 estats->no_buff_discard_hi = 0;
856 estats->no_buff_discard_lo = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300857 estats->total_tpa_aggregations_hi = 0;
858 estats->total_tpa_aggregations_lo = 0;
859 estats->total_tpa_aggregated_frames_hi = 0;
860 estats->total_tpa_aggregated_frames_lo = 0;
861 estats->total_tpa_bytes_hi = 0;
862 estats->total_tpa_bytes_lo = 0;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000863
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000864 for_each_eth_queue(bp, i) {
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000865 struct bnx2x_fastpath *fp = &bp->fp[i];
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300866 struct tstorm_per_queue_stats *tclient =
867 &bp->fw_stats_data->queue_stats[i].
868 tstorm_queue_statistics;
869 struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient;
870 struct ustorm_per_queue_stats *uclient =
871 &bp->fw_stats_data->queue_stats[i].
872 ustorm_queue_statistics;
873 struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient;
874 struct xstorm_per_queue_stats *xclient =
875 &bp->fw_stats_data->queue_stats[i].
876 xstorm_queue_statistics;
877 struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000878 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
879 u32 diff;
880
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300881 DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, "
882 "bcast_sent 0x%x mcast_sent 0x%x\n",
883 i, xclient->ucast_pkts_sent,
884 xclient->bcast_pkts_sent, xclient->mcast_pkts_sent);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000885
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300886 DP(BNX2X_MSG_STATS, "---------------\n");
887
888 qstats->total_broadcast_bytes_received_hi =
889 le32_to_cpu(tclient->rcv_bcast_bytes.hi);
890 qstats->total_broadcast_bytes_received_lo =
891 le32_to_cpu(tclient->rcv_bcast_bytes.lo);
892
893 qstats->total_multicast_bytes_received_hi =
894 le32_to_cpu(tclient->rcv_mcast_bytes.hi);
895 qstats->total_multicast_bytes_received_lo =
896 le32_to_cpu(tclient->rcv_mcast_bytes.lo);
897
898 qstats->total_unicast_bytes_received_hi =
899 le32_to_cpu(tclient->rcv_ucast_bytes.hi);
900 qstats->total_unicast_bytes_received_lo =
901 le32_to_cpu(tclient->rcv_ucast_bytes.lo);
902
903 /*
904 * sum to total_bytes_received all
905 * unicast/multicast/broadcast
906 */
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000907 qstats->total_bytes_received_hi =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300908 qstats->total_broadcast_bytes_received_hi;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000909 qstats->total_bytes_received_lo =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300910 qstats->total_broadcast_bytes_received_lo;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000911
912 ADD_64(qstats->total_bytes_received_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300913 qstats->total_multicast_bytes_received_hi,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000914 qstats->total_bytes_received_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300915 qstats->total_multicast_bytes_received_lo);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000916
917 ADD_64(qstats->total_bytes_received_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300918 qstats->total_unicast_bytes_received_hi,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000919 qstats->total_bytes_received_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300920 qstats->total_unicast_bytes_received_lo);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000921
922 qstats->valid_bytes_received_hi =
923 qstats->total_bytes_received_hi;
924 qstats->valid_bytes_received_lo =
925 qstats->total_bytes_received_lo;
926
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000927
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300928 UPDATE_EXTEND_TSTAT(rcv_ucast_pkts,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000929 total_unicast_packets_received);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300930 UPDATE_EXTEND_TSTAT(rcv_mcast_pkts,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000931 total_multicast_packets_received);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300932 UPDATE_EXTEND_TSTAT(rcv_bcast_pkts,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000933 total_broadcast_packets_received);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300934 UPDATE_EXTEND_TSTAT(pkts_too_big_discard,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000935 etherstatsoverrsizepkts);
936 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
937
938 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
939 total_unicast_packets_received);
940 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
941 total_multicast_packets_received);
942 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
943 total_broadcast_packets_received);
944 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
945 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
946 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
947
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300948 qstats->total_broadcast_bytes_transmitted_hi =
949 le32_to_cpu(xclient->bcast_bytes_sent.hi);
950 qstats->total_broadcast_bytes_transmitted_lo =
951 le32_to_cpu(xclient->bcast_bytes_sent.lo);
952
953 qstats->total_multicast_bytes_transmitted_hi =
954 le32_to_cpu(xclient->mcast_bytes_sent.hi);
955 qstats->total_multicast_bytes_transmitted_lo =
956 le32_to_cpu(xclient->mcast_bytes_sent.lo);
957
958 qstats->total_unicast_bytes_transmitted_hi =
959 le32_to_cpu(xclient->ucast_bytes_sent.hi);
960 qstats->total_unicast_bytes_transmitted_lo =
961 le32_to_cpu(xclient->ucast_bytes_sent.lo);
962 /*
963 * sum to total_bytes_transmitted all
964 * unicast/multicast/broadcast
965 */
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000966 qstats->total_bytes_transmitted_hi =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300967 qstats->total_unicast_bytes_transmitted_hi;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000968 qstats->total_bytes_transmitted_lo =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300969 qstats->total_unicast_bytes_transmitted_lo;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000970
971 ADD_64(qstats->total_bytes_transmitted_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300972 qstats->total_broadcast_bytes_transmitted_hi,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000973 qstats->total_bytes_transmitted_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300974 qstats->total_broadcast_bytes_transmitted_lo);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000975
976 ADD_64(qstats->total_bytes_transmitted_hi,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300977 qstats->total_multicast_bytes_transmitted_hi,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000978 qstats->total_bytes_transmitted_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300979 qstats->total_multicast_bytes_transmitted_lo);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000980
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300981 UPDATE_EXTEND_XSTAT(ucast_pkts_sent,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000982 total_unicast_packets_transmitted);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300983 UPDATE_EXTEND_XSTAT(mcast_pkts_sent,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000984 total_multicast_packets_transmitted);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300985 UPDATE_EXTEND_XSTAT(bcast_pkts_sent,
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000986 total_broadcast_packets_transmitted);
987
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300988 UPDATE_EXTEND_TSTAT(checksum_discard,
989 total_packets_received_checksum_discarded);
990 UPDATE_EXTEND_TSTAT(ttl0_discard,
991 total_packets_received_ttl0_discarded);
992
993 UPDATE_EXTEND_XSTAT(error_drop_pkts,
994 total_transmitted_dropped_packets_error);
995
996 /* TPA aggregations completed */
997 UPDATE_EXTEND_USTAT(coalesced_events, total_tpa_aggregations);
998 /* Number of network frames aggregated by TPA */
999 UPDATE_EXTEND_USTAT(coalesced_pkts,
1000 total_tpa_aggregated_frames);
1001 /* Total number of bytes in completed TPA aggregations */
1002 qstats->total_tpa_bytes_lo =
1003 le32_to_cpu(uclient->coalesced_bytes.lo);
1004 qstats->total_tpa_bytes_hi =
1005 le32_to_cpu(uclient->coalesced_bytes.hi);
1006
1007 /* TPA stats per-function */
1008 ADD_64(estats->total_tpa_aggregations_hi,
1009 qstats->total_tpa_aggregations_hi,
1010 estats->total_tpa_aggregations_lo,
1011 qstats->total_tpa_aggregations_lo);
1012 ADD_64(estats->total_tpa_aggregated_frames_hi,
1013 qstats->total_tpa_aggregated_frames_hi,
1014 estats->total_tpa_aggregated_frames_lo,
1015 qstats->total_tpa_aggregated_frames_lo);
1016 ADD_64(estats->total_tpa_bytes_hi,
1017 qstats->total_tpa_bytes_hi,
1018 estats->total_tpa_bytes_lo,
1019 qstats->total_tpa_bytes_lo);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001020
1021 ADD_64(fstats->total_bytes_received_hi,
1022 qstats->total_bytes_received_hi,
1023 fstats->total_bytes_received_lo,
1024 qstats->total_bytes_received_lo);
1025 ADD_64(fstats->total_bytes_transmitted_hi,
1026 qstats->total_bytes_transmitted_hi,
1027 fstats->total_bytes_transmitted_lo,
1028 qstats->total_bytes_transmitted_lo);
1029 ADD_64(fstats->total_unicast_packets_received_hi,
1030 qstats->total_unicast_packets_received_hi,
1031 fstats->total_unicast_packets_received_lo,
1032 qstats->total_unicast_packets_received_lo);
1033 ADD_64(fstats->total_multicast_packets_received_hi,
1034 qstats->total_multicast_packets_received_hi,
1035 fstats->total_multicast_packets_received_lo,
1036 qstats->total_multicast_packets_received_lo);
1037 ADD_64(fstats->total_broadcast_packets_received_hi,
1038 qstats->total_broadcast_packets_received_hi,
1039 fstats->total_broadcast_packets_received_lo,
1040 qstats->total_broadcast_packets_received_lo);
1041 ADD_64(fstats->total_unicast_packets_transmitted_hi,
1042 qstats->total_unicast_packets_transmitted_hi,
1043 fstats->total_unicast_packets_transmitted_lo,
1044 qstats->total_unicast_packets_transmitted_lo);
1045 ADD_64(fstats->total_multicast_packets_transmitted_hi,
1046 qstats->total_multicast_packets_transmitted_hi,
1047 fstats->total_multicast_packets_transmitted_lo,
1048 qstats->total_multicast_packets_transmitted_lo);
1049 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
1050 qstats->total_broadcast_packets_transmitted_hi,
1051 fstats->total_broadcast_packets_transmitted_lo,
1052 qstats->total_broadcast_packets_transmitted_lo);
1053 ADD_64(fstats->valid_bytes_received_hi,
1054 qstats->valid_bytes_received_hi,
1055 fstats->valid_bytes_received_lo,
1056 qstats->valid_bytes_received_lo);
1057
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001058 ADD_64(estats->etherstatsoverrsizepkts_hi,
1059 qstats->etherstatsoverrsizepkts_hi,
1060 estats->etherstatsoverrsizepkts_lo,
1061 qstats->etherstatsoverrsizepkts_lo);
1062 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
1063 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
1064 }
1065
1066 ADD_64(fstats->total_bytes_received_hi,
1067 estats->rx_stat_ifhcinbadoctets_hi,
1068 fstats->total_bytes_received_lo,
1069 estats->rx_stat_ifhcinbadoctets_lo);
1070
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001071 ADD_64(fstats->total_bytes_received_hi,
1072 tfunc->rcv_error_bytes.hi,
1073 fstats->total_bytes_received_lo,
1074 tfunc->rcv_error_bytes.lo);
1075
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001076 memcpy(estats, &(fstats->total_bytes_received_hi),
1077 sizeof(struct host_func_stats) - 2*sizeof(u32));
1078
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001079 ADD_64(estats->error_bytes_received_hi,
1080 tfunc->rcv_error_bytes.hi,
1081 estats->error_bytes_received_lo,
1082 tfunc->rcv_error_bytes.lo);
1083
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001084 ADD_64(estats->etherstatsoverrsizepkts_hi,
1085 estats->rx_stat_dot3statsframestoolong_hi,
1086 estats->etherstatsoverrsizepkts_lo,
1087 estats->rx_stat_dot3statsframestoolong_lo);
1088 ADD_64(estats->error_bytes_received_hi,
1089 estats->rx_stat_ifhcinbadoctets_hi,
1090 estats->error_bytes_received_lo,
1091 estats->rx_stat_ifhcinbadoctets_lo);
1092
1093 if (bp->port.pmf) {
1094 estats->mac_filter_discard =
1095 le32_to_cpu(tport->mac_filter_discard);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001096 estats->mf_tag_discard =
1097 le32_to_cpu(tport->mf_tag_discard);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001098 estats->brb_truncate_discard =
1099 le32_to_cpu(tport->brb_truncate_discard);
1100 estats->mac_discard = le32_to_cpu(tport->mac_discard);
1101 }
1102
1103 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
1104
1105 bp->stats_pending = 0;
1106
1107 return 0;
1108}
1109
1110static void bnx2x_net_stats_update(struct bnx2x *bp)
1111{
1112 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1113 struct net_device_stats *nstats = &bp->dev->stats;
Eric Dumazetc6a056a2010-08-23 19:44:08 +00001114 unsigned long tmp;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001115 int i;
1116
1117 nstats->rx_packets =
1118 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
1119 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
1120 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
1121
1122 nstats->tx_packets =
1123 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
1124 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
1125 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
1126
1127 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
1128
1129 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
1130
Eric Dumazetc6a056a2010-08-23 19:44:08 +00001131 tmp = estats->mac_discard;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001132 for_each_rx_queue(bp, i)
Eric Dumazetc6a056a2010-08-23 19:44:08 +00001133 tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
1134 nstats->rx_dropped = tmp;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001135
1136 nstats->tx_dropped = 0;
1137
1138 nstats->multicast =
1139 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
1140
1141 nstats->collisions =
1142 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
1143
1144 nstats->rx_length_errors =
1145 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
1146 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
1147 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
1148 bnx2x_hilo(&estats->brb_truncate_hi);
1149 nstats->rx_crc_errors =
1150 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
1151 nstats->rx_frame_errors =
1152 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
1153 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001154 nstats->rx_missed_errors = 0;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001155
1156 nstats->rx_errors = nstats->rx_length_errors +
1157 nstats->rx_over_errors +
1158 nstats->rx_crc_errors +
1159 nstats->rx_frame_errors +
1160 nstats->rx_fifo_errors +
1161 nstats->rx_missed_errors;
1162
1163 nstats->tx_aborted_errors =
1164 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
1165 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
1166 nstats->tx_carrier_errors =
1167 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
1168 nstats->tx_fifo_errors = 0;
1169 nstats->tx_heartbeat_errors = 0;
1170 nstats->tx_window_errors = 0;
1171
1172 nstats->tx_errors = nstats->tx_aborted_errors +
1173 nstats->tx_carrier_errors +
1174 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
1175}
1176
1177static void bnx2x_drv_stats_update(struct bnx2x *bp)
1178{
1179 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1180 int i;
1181
1182 estats->driver_xoff = 0;
1183 estats->rx_err_discard_pkt = 0;
1184 estats->rx_skb_alloc_failed = 0;
1185 estats->hw_csum_err = 0;
1186 for_each_queue(bp, i) {
1187 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
1188
1189 estats->driver_xoff += qstats->driver_xoff;
1190 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
1191 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
1192 estats->hw_csum_err += qstats->hw_csum_err;
1193 }
1194}
1195
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001196static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp)
1197{
1198 u32 val;
1199
1200 if (SHMEM2_HAS(bp, edebug_driver_if[1])) {
1201 val = SHMEM2_RD(bp, edebug_driver_if[1]);
1202
1203 if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT)
1204 return true;
1205 }
1206
1207 return false;
1208}
1209
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001210static void bnx2x_stats_update(struct bnx2x *bp)
1211{
1212 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1213
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001214 if (bnx2x_edebug_stats_stopped(bp))
1215 return;
1216
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001217 if (*stats_comp != DMAE_COMP_VAL)
1218 return;
1219
1220 if (bp->port.pmf)
1221 bnx2x_hw_stats_update(bp);
1222
1223 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
1224 BNX2X_ERR("storm stats were not updated for 3 times\n");
1225 bnx2x_panic();
1226 return;
1227 }
1228
1229 bnx2x_net_stats_update(bp);
1230 bnx2x_drv_stats_update(bp);
1231
1232 if (netif_msg_timer(bp)) {
1233 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001234 int i, cos;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001235
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001236 netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n",
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001237 estats->brb_drop_lo, estats->brb_truncate_lo);
1238
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001239 for_each_eth_queue(bp, i) {
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001240 struct bnx2x_fastpath *fp = &bp->fp[i];
1241 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
1242
Joe Perchesf1deab52011-08-14 12:16:21 +00001243 pr_debug("%s: rx usage(%4u) *rx_cons_sb(%u) rx pkt(%lu) rx calls(%lu %lu)\n",
1244 fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
1245 fp->rx_comp_cons),
1246 le16_to_cpu(*fp->rx_cons_sb),
1247 bnx2x_hilo(&qstats->
1248 total_unicast_packets_received_hi),
1249 fp->rx_calls, fp->rx_pkt);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001250 }
1251
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001252 for_each_eth_queue(bp, i) {
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001253 struct bnx2x_fastpath *fp = &bp->fp[i];
Ariel Elior6383c0b2011-07-14 08:31:57 +00001254 struct bnx2x_fp_txdata *txdata;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001255 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001256 struct netdev_queue *txq;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001257
Joe Perchesf1deab52011-08-14 12:16:21 +00001258 pr_debug("%s: tx pkt(%lu) (Xoff events %u)",
1259 fp->name,
1260 bnx2x_hilo(
1261 &qstats->total_unicast_packets_transmitted_hi),
1262 qstats->driver_xoff);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001263
1264 for_each_cos_in_tx_queue(fp, cos) {
1265 txdata = &fp->txdata[cos];
1266 txq = netdev_get_tx_queue(bp->dev,
1267 FP_COS_TO_TXQ(fp, cos));
1268
Joe Perchesf1deab52011-08-14 12:16:21 +00001269 pr_debug("%d: tx avail(%4u) *tx_cons_sb(%u) tx calls (%lu) %s\n",
1270 cos,
1271 bnx2x_tx_avail(bp, txdata),
1272 le16_to_cpu(*txdata->tx_cons_sb),
1273 txdata->tx_pkt,
1274 (netif_tx_queue_stopped(txq) ?
1275 "Xoff" : "Xon")
1276 );
Ariel Elior6383c0b2011-07-14 08:31:57 +00001277 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001278 }
1279 }
1280
1281 bnx2x_hw_stats_post(bp);
1282 bnx2x_storm_stats_post(bp);
1283}
1284
1285static void bnx2x_port_stats_stop(struct bnx2x *bp)
1286{
1287 struct dmae_command *dmae;
1288 u32 opcode;
1289 int loader_idx = PMF_DMAE_C(bp);
1290 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1291
1292 bp->executer_idx = 0;
1293
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001294 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001295
1296 if (bp->port.port_stx) {
1297
1298 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1299 if (bp->func_stx)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001300 dmae->opcode = bnx2x_dmae_opcode_add_comp(
1301 opcode, DMAE_COMP_GRC);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001302 else
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001303 dmae->opcode = bnx2x_dmae_opcode_add_comp(
1304 opcode, DMAE_COMP_PCI);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001305
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001306 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1307 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1308 dmae->dst_addr_lo = bp->port.port_stx >> 2;
1309 dmae->dst_addr_hi = 0;
1310 dmae->len = sizeof(struct host_port_stats) >> 2;
1311 if (bp->func_stx) {
1312 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
1313 dmae->comp_addr_hi = 0;
1314 dmae->comp_val = 1;
1315 } else {
1316 dmae->comp_addr_lo =
1317 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1318 dmae->comp_addr_hi =
1319 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1320 dmae->comp_val = DMAE_COMP_VAL;
1321
1322 *stats_comp = 0;
1323 }
1324 }
1325
1326 if (bp->func_stx) {
1327
1328 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001329 dmae->opcode =
1330 bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001331 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
1332 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
1333 dmae->dst_addr_lo = bp->func_stx >> 2;
1334 dmae->dst_addr_hi = 0;
1335 dmae->len = sizeof(struct host_func_stats) >> 2;
1336 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1337 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1338 dmae->comp_val = DMAE_COMP_VAL;
1339
1340 *stats_comp = 0;
1341 }
1342}
1343
1344static void bnx2x_stats_stop(struct bnx2x *bp)
1345{
1346 int update = 0;
1347
1348 bnx2x_stats_comp(bp);
1349
1350 if (bp->port.pmf)
1351 update = (bnx2x_hw_stats_update(bp) == 0);
1352
1353 update |= (bnx2x_storm_stats_update(bp) == 0);
1354
1355 if (update) {
1356 bnx2x_net_stats_update(bp);
1357
1358 if (bp->port.pmf)
1359 bnx2x_port_stats_stop(bp);
1360
1361 bnx2x_hw_stats_post(bp);
1362 bnx2x_stats_comp(bp);
1363 }
1364}
1365
1366static void bnx2x_stats_do_nothing(struct bnx2x *bp)
1367{
1368}
1369
1370static const struct {
1371 void (*action)(struct bnx2x *bp);
1372 enum bnx2x_stats_state next_state;
1373} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
1374/* state event */
1375{
1376/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
1377/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
1378/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
1379/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
1380},
1381{
1382/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
1383/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
1384/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
1385/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
1386}
1387};
1388
1389void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
1390{
David S. Millerbb7e95c2010-07-27 21:01:35 -07001391 enum bnx2x_stats_state state;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001392 if (unlikely(bp->panic))
1393 return;
Dmitry Kravkov4a025f42011-11-13 04:34:30 +00001394
David S. Millerbb7e95c2010-07-27 21:01:35 -07001395 spin_lock_bh(&bp->stats_lock);
1396 state = bp->stats_state;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001397 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
David S. Millerbb7e95c2010-07-27 21:01:35 -07001398 spin_unlock_bh(&bp->stats_lock);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001399
Dmitry Kravkov4a025f42011-11-13 04:34:30 +00001400 bnx2x_stats_stm[state][event].action(bp);
1401
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001402 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
1403 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
1404 state, event, bp->stats_state);
1405}
1406
1407static void bnx2x_port_stats_base_init(struct bnx2x *bp)
1408{
1409 struct dmae_command *dmae;
1410 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1411
1412 /* sanity */
1413 if (!bp->port.pmf || !bp->port.port_stx) {
1414 BNX2X_ERR("BUG!\n");
1415 return;
1416 }
1417
1418 bp->executer_idx = 0;
1419
1420 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001421 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
1422 true, DMAE_COMP_PCI);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001423 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1424 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1425 dmae->dst_addr_lo = bp->port.port_stx >> 2;
1426 dmae->dst_addr_hi = 0;
1427 dmae->len = sizeof(struct host_port_stats) >> 2;
1428 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1429 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1430 dmae->comp_val = DMAE_COMP_VAL;
1431
1432 *stats_comp = 0;
1433 bnx2x_hw_stats_post(bp);
1434 bnx2x_stats_comp(bp);
1435}
1436
1437static void bnx2x_func_stats_base_init(struct bnx2x *bp)
1438{
David S. Miller8decf862011-09-22 03:23:13 -04001439 int vn, vn_max = IS_MF(bp) ? BP_MAX_VN_NUM(bp) : E1VN_MAX;
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001440 u32 func_stx;
1441
1442 /* sanity */
1443 if (!bp->port.pmf || !bp->func_stx) {
1444 BNX2X_ERR("BUG!\n");
1445 return;
1446 }
1447
1448 /* save our func_stx */
1449 func_stx = bp->func_stx;
1450
1451 for (vn = VN_0; vn < vn_max; vn++) {
David S. Miller8decf862011-09-22 03:23:13 -04001452 int mb_idx = BP_FW_MB_IDX_VN(bp, vn);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001453
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001454 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001455 bnx2x_func_stats_init(bp);
1456 bnx2x_hw_stats_post(bp);
1457 bnx2x_stats_comp(bp);
1458 }
1459
1460 /* restore our func_stx */
1461 bp->func_stx = func_stx;
1462}
1463
1464static void bnx2x_func_stats_base_update(struct bnx2x *bp)
1465{
1466 struct dmae_command *dmae = &bp->stats_dmae;
1467 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1468
1469 /* sanity */
1470 if (!bp->func_stx) {
1471 BNX2X_ERR("BUG!\n");
1472 return;
1473 }
1474
1475 bp->executer_idx = 0;
1476 memset(dmae, 0, sizeof(struct dmae_command));
1477
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001478 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
1479 true, DMAE_COMP_PCI);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001480 dmae->src_addr_lo = bp->func_stx >> 2;
1481 dmae->src_addr_hi = 0;
1482 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
1483 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
1484 dmae->len = sizeof(struct host_func_stats) >> 2;
1485 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1486 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1487 dmae->comp_val = DMAE_COMP_VAL;
1488
1489 *stats_comp = 0;
1490 bnx2x_hw_stats_post(bp);
1491 bnx2x_stats_comp(bp);
1492}
1493
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001494/**
1495 * This function will prepare the statistics ramrod data the way
1496 * we will only have to increment the statistics counter and
1497 * send the ramrod each time we have to.
1498 *
1499 * @param bp
1500 */
1501static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
1502{
1503 int i;
1504 struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr;
1505
1506 dma_addr_t cur_data_offset;
1507 struct stats_query_entry *cur_query_entry;
1508
1509 stats_hdr->cmd_num = bp->fw_stats_num;
1510 stats_hdr->drv_stats_counter = 0;
1511
1512 /* storm_counters struct contains the counters of completed
1513 * statistics requests per storm which are incremented by FW
1514 * each time it completes hadning a statistics ramrod. We will
1515 * check these counters in the timer handler and discard a
1516 * (statistics) ramrod completion.
1517 */
1518 cur_data_offset = bp->fw_stats_data_mapping +
1519 offsetof(struct bnx2x_fw_stats_data, storm_counters);
1520
1521 stats_hdr->stats_counters_addrs.hi =
1522 cpu_to_le32(U64_HI(cur_data_offset));
1523 stats_hdr->stats_counters_addrs.lo =
1524 cpu_to_le32(U64_LO(cur_data_offset));
1525
1526 /* prepare to the first stats ramrod (will be completed with
1527 * the counters equal to zero) - init counters to somethig different.
1528 */
1529 memset(&bp->fw_stats_data->storm_counters, 0xff,
1530 sizeof(struct stats_counter));
1531
1532 /**** Port FW statistics data ****/
1533 cur_data_offset = bp->fw_stats_data_mapping +
1534 offsetof(struct bnx2x_fw_stats_data, port);
1535
1536 cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX];
1537
1538 cur_query_entry->kind = STATS_TYPE_PORT;
1539 /* For port query index is a DONT CARE */
1540 cur_query_entry->index = BP_PORT(bp);
1541 /* For port query funcID is a DONT CARE */
1542 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1543 cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
1544 cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
1545
1546 /**** PF FW statistics data ****/
1547 cur_data_offset = bp->fw_stats_data_mapping +
1548 offsetof(struct bnx2x_fw_stats_data, pf);
1549
1550 cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX];
1551
1552 cur_query_entry->kind = STATS_TYPE_PF;
1553 /* For PF query index is a DONT CARE */
1554 cur_query_entry->index = BP_PORT(bp);
1555 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1556 cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
1557 cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
1558
1559 /**** Clients' queries ****/
1560 cur_data_offset = bp->fw_stats_data_mapping +
1561 offsetof(struct bnx2x_fw_stats_data, queue_stats);
1562
1563 for_each_eth_queue(bp, i) {
1564 cur_query_entry =
1565 &bp->fw_stats_req->
1566 query[BNX2X_FIRST_QUEUE_QUERY_IDX + i];
1567
1568 cur_query_entry->kind = STATS_TYPE_QUEUE;
1569 cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]);
1570 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1571 cur_query_entry->address.hi =
1572 cpu_to_le32(U64_HI(cur_data_offset));
1573 cur_query_entry->address.lo =
1574 cpu_to_le32(U64_LO(cur_data_offset));
1575
1576 cur_data_offset += sizeof(struct per_queue_stats);
1577 }
1578}
1579
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001580void bnx2x_stats_init(struct bnx2x *bp)
1581{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001582 int /*abs*/port = BP_PORT(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001583 int mb_idx = BP_FW_MB_IDX(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001584 int i;
1585
1586 bp->stats_pending = 0;
1587 bp->executer_idx = 0;
1588 bp->stats_counter = 0;
1589
1590 /* port and func stats for management */
1591 if (!BP_NOMCP(bp)) {
1592 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001593 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001594
1595 } else {
1596 bp->port.port_stx = 0;
1597 bp->func_stx = 0;
1598 }
1599 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
1600 bp->port.port_stx, bp->func_stx);
1601
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001602 port = BP_PORT(bp);
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001603 /* port stats */
1604 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
1605 bp->port.old_nig_stats.brb_discard =
1606 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
1607 bp->port.old_nig_stats.brb_truncate =
1608 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001609 if (!CHIP_IS_E3(bp)) {
1610 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
1611 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
1612 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
1613 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
1614 }
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001615
1616 /* function stats */
1617 for_each_queue(bp, i) {
1618 struct bnx2x_fastpath *fp = &bp->fp[i];
1619
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001620 memset(&fp->old_tclient, 0, sizeof(fp->old_tclient));
1621 memset(&fp->old_uclient, 0, sizeof(fp->old_uclient));
1622 memset(&fp->old_xclient, 0, sizeof(fp->old_xclient));
1623 memset(&fp->eth_q_stats, 0, sizeof(fp->eth_q_stats));
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001624 }
1625
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001626 /* Prepare statistics ramrod data */
1627 bnx2x_prep_fw_stats_req(bp);
Dmitry Kravkov6fe8bce2010-10-06 03:35:03 +00001628
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001629 memset(&bp->dev->stats, 0, sizeof(bp->dev->stats));
1630 memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
Dmitry Kravkov6c719d02010-07-27 12:36:15 +00001631
1632 bp->stats_state = STATS_STATE_DISABLED;
1633
1634 if (bp->port.pmf) {
1635 if (bp->port.port_stx)
1636 bnx2x_port_stats_base_init(bp);
1637
1638 if (bp->func_stx)
1639 bnx2x_func_stats_base_init(bp);
1640
1641 } else if (bp->func_stx)
1642 bnx2x_func_stats_base_update(bp);
1643}