huangdaode | 511e6bc | 2015-09-17 14:51:49 +0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2014-2015 Hisilicon Limited. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by |
| 6 | * the Free Software Foundation; either version 2 of the License, or |
| 7 | * (at your option) any later version. |
| 8 | */ |
| 9 | |
| 10 | #include <linux/cdev.h> |
| 11 | #include <linux/module.h> |
| 12 | #include <linux/kernel.h> |
| 13 | #include <linux/init.h> |
| 14 | #include <linux/netdevice.h> |
| 15 | #include <linux/etherdevice.h> |
| 16 | #include <asm/cacheflush.h> |
| 17 | #include <linux/platform_device.h> |
| 18 | #include <linux/of.h> |
| 19 | #include <linux/of_address.h> |
| 20 | #include <linux/of_platform.h> |
| 21 | #include <linux/of_irq.h> |
| 22 | #include <linux/spinlock.h> |
| 23 | |
| 24 | #include "hns_dsaf_main.h" |
| 25 | #include "hns_dsaf_ppe.h" |
| 26 | #include "hns_dsaf_rcb.h" |
| 27 | |
| 28 | #define RCB_COMMON_REG_OFFSET 0x80000 |
| 29 | #define TX_RING 0 |
| 30 | #define RX_RING 1 |
| 31 | |
| 32 | #define RCB_RESET_WAIT_TIMES 30 |
| 33 | #define RCB_RESET_TRY_TIMES 10 |
| 34 | |
| 35 | /** |
| 36 | *hns_rcb_wait_fbd_clean - clean fbd |
| 37 | *@qs: ring struct pointer array |
| 38 | *@qnum: num of array |
| 39 | *@flag: tx or rx flag |
| 40 | */ |
| 41 | void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag) |
| 42 | { |
| 43 | int i, wait_cnt; |
| 44 | u32 fbd_num; |
| 45 | |
| 46 | for (wait_cnt = i = 0; i < q_num; wait_cnt++) { |
| 47 | usleep_range(200, 300); |
| 48 | fbd_num = 0; |
| 49 | if (flag & RCB_INT_FLAG_TX) |
| 50 | fbd_num += dsaf_read_dev(qs[i], |
| 51 | RCB_RING_TX_RING_FBDNUM_REG); |
| 52 | if (flag & RCB_INT_FLAG_RX) |
| 53 | fbd_num += dsaf_read_dev(qs[i], |
| 54 | RCB_RING_RX_RING_FBDNUM_REG); |
| 55 | if (!fbd_num) |
| 56 | i++; |
| 57 | if (wait_cnt >= 10000) |
| 58 | break; |
| 59 | } |
| 60 | |
| 61 | if (i < q_num) |
| 62 | dev_err(qs[i]->handle->owner_dev, |
| 63 | "queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num); |
| 64 | } |
| 65 | |
| 66 | /** |
| 67 | *hns_rcb_reset_ring_hw - ring reset |
| 68 | *@q: ring struct pointer |
| 69 | */ |
| 70 | void hns_rcb_reset_ring_hw(struct hnae_queue *q) |
| 71 | { |
| 72 | u32 wait_cnt; |
| 73 | u32 try_cnt = 0; |
| 74 | u32 could_ret; |
| 75 | |
| 76 | u32 tx_fbd_num; |
| 77 | |
| 78 | while (try_cnt++ < RCB_RESET_TRY_TIMES) { |
| 79 | usleep_range(100, 200); |
| 80 | tx_fbd_num = dsaf_read_dev(q, RCB_RING_TX_RING_FBDNUM_REG); |
| 81 | if (tx_fbd_num) |
| 82 | continue; |
| 83 | |
| 84 | dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, 0); |
| 85 | |
| 86 | dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1); |
| 87 | |
| 88 | msleep(20); |
| 89 | could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST); |
| 90 | |
| 91 | wait_cnt = 0; |
| 92 | while (!could_ret && (wait_cnt < RCB_RESET_WAIT_TIMES)) { |
| 93 | dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0); |
| 94 | |
| 95 | dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1); |
| 96 | |
| 97 | msleep(20); |
| 98 | could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST); |
| 99 | |
| 100 | wait_cnt++; |
| 101 | } |
| 102 | |
| 103 | dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0); |
| 104 | |
| 105 | if (could_ret) |
| 106 | break; |
| 107 | } |
| 108 | |
| 109 | if (try_cnt >= RCB_RESET_TRY_TIMES) |
| 110 | dev_err(q->dev->dev, "port%d reset ring fail\n", |
| 111 | hns_ae_get_vf_cb(q->handle)->port_index); |
| 112 | } |
| 113 | |
| 114 | /** |
| 115 | *hns_rcb_int_ctrl_hw - rcb irq enable control |
| 116 | *@q: hnae queue struct pointer |
| 117 | *@flag:ring flag tx or rx |
| 118 | *@mask:mask |
| 119 | */ |
| 120 | void hns_rcb_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask) |
| 121 | { |
| 122 | u32 int_mask_en = !!mask; |
| 123 | |
| 124 | if (flag & RCB_INT_FLAG_TX) { |
| 125 | dsaf_write_dev(q, RCB_RING_INTMSK_TXWL_REG, int_mask_en); |
| 126 | dsaf_write_dev(q, RCB_RING_INTMSK_TX_OVERTIME_REG, |
| 127 | int_mask_en); |
| 128 | } |
| 129 | |
| 130 | if (flag & RCB_INT_FLAG_RX) { |
| 131 | dsaf_write_dev(q, RCB_RING_INTMSK_RXWL_REG, int_mask_en); |
| 132 | dsaf_write_dev(q, RCB_RING_INTMSK_RX_OVERTIME_REG, |
| 133 | int_mask_en); |
| 134 | } |
| 135 | } |
| 136 | |
| 137 | void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag) |
| 138 | { |
| 139 | u32 clr = 1; |
| 140 | |
| 141 | if (flag & RCB_INT_FLAG_TX) { |
| 142 | dsaf_write_dev(q, RCB_RING_INTSTS_TX_RING_REG, clr); |
| 143 | dsaf_write_dev(q, RCB_RING_INTSTS_TX_OVERTIME_REG, clr); |
| 144 | } |
| 145 | |
| 146 | if (flag & RCB_INT_FLAG_RX) { |
| 147 | dsaf_write_dev(q, RCB_RING_INTSTS_RX_RING_REG, clr); |
| 148 | dsaf_write_dev(q, RCB_RING_INTSTS_RX_OVERTIME_REG, clr); |
| 149 | } |
| 150 | } |
| 151 | |
| 152 | /** |
| 153 | *hns_rcb_ring_enable_hw - enable ring |
| 154 | *@ring: rcb ring |
| 155 | */ |
| 156 | void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val) |
| 157 | { |
| 158 | dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, !!val); |
| 159 | } |
| 160 | |
| 161 | void hns_rcb_start(struct hnae_queue *q, u32 val) |
| 162 | { |
| 163 | hns_rcb_ring_enable_hw(q, val); |
| 164 | } |
| 165 | |
| 166 | /** |
| 167 | *hns_rcb_common_init_commit_hw - make rcb common init completed |
| 168 | *@rcb_common: rcb common device |
| 169 | */ |
| 170 | void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common) |
| 171 | { |
| 172 | wmb(); /* Sync point before breakpoint */ |
| 173 | dsaf_write_dev(rcb_common, RCB_COM_CFG_SYS_FSH_REG, 1); |
| 174 | wmb(); /* Sync point after breakpoint */ |
| 175 | } |
| 176 | |
| 177 | /** |
| 178 | *hns_rcb_ring_init - init rcb ring |
| 179 | *@ring_pair: ring pair control block |
| 180 | *@ring_type: ring type, RX_RING or TX_RING |
| 181 | */ |
| 182 | static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type) |
| 183 | { |
| 184 | struct hnae_queue *q = &ring_pair->q; |
| 185 | struct rcb_common_cb *rcb_common = ring_pair->rcb_common; |
| 186 | u32 bd_size_type = rcb_common->dsaf_dev->buf_size_type; |
| 187 | struct hnae_ring *ring = |
| 188 | (ring_type == RX_RING) ? &q->rx_ring : &q->tx_ring; |
| 189 | dma_addr_t dma = ring->desc_dma_addr; |
| 190 | |
| 191 | if (ring_type == RX_RING) { |
| 192 | dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_L_REG, |
| 193 | (u32)dma); |
| 194 | dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_H_REG, |
huangdaode | e4600d6 | 2015-09-27 15:22:44 +0800 | [diff] [blame^] | 195 | (u32)((dma >> 31) >> 1)); |
huangdaode | 511e6bc | 2015-09-17 14:51:49 +0800 | [diff] [blame] | 196 | dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG, |
| 197 | bd_size_type); |
| 198 | dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG, |
| 199 | ring_pair->port_id_in_dsa); |
| 200 | dsaf_write_dev(q, RCB_RING_RX_RING_PKTLINE_REG, |
| 201 | ring_pair->port_id_in_dsa); |
| 202 | } else { |
| 203 | dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_L_REG, |
| 204 | (u32)dma); |
| 205 | dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_H_REG, |
huangdaode | e4600d6 | 2015-09-27 15:22:44 +0800 | [diff] [blame^] | 206 | (u32)((dma >> 31) >> 1)); |
huangdaode | 511e6bc | 2015-09-17 14:51:49 +0800 | [diff] [blame] | 207 | dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG, |
| 208 | bd_size_type); |
| 209 | dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG, |
| 210 | ring_pair->port_id_in_dsa); |
| 211 | dsaf_write_dev(q, RCB_RING_TX_RING_PKTLINE_REG, |
| 212 | ring_pair->port_id_in_dsa); |
| 213 | } |
| 214 | } |
| 215 | |
| 216 | /** |
| 217 | *hns_rcb_init_hw - init rcb hardware |
| 218 | *@ring: rcb ring |
| 219 | */ |
| 220 | void hns_rcb_init_hw(struct ring_pair_cb *ring) |
| 221 | { |
| 222 | hns_rcb_ring_init(ring, RX_RING); |
| 223 | hns_rcb_ring_init(ring, TX_RING); |
| 224 | } |
| 225 | |
| 226 | /** |
| 227 | *hns_rcb_set_port_desc_cnt - set rcb port description num |
| 228 | *@rcb_common: rcb_common device |
| 229 | *@port_idx:port index |
| 230 | *@desc_cnt:BD num |
| 231 | */ |
| 232 | static void hns_rcb_set_port_desc_cnt(struct rcb_common_cb *rcb_common, |
| 233 | u32 port_idx, u32 desc_cnt) |
| 234 | { |
| 235 | if (port_idx >= HNS_RCB_SERVICE_NW_ENGINE_NUM) |
| 236 | port_idx = 0; |
| 237 | |
| 238 | dsaf_write_dev(rcb_common, RCB_CFG_BD_NUM_REG + port_idx * 4, |
| 239 | desc_cnt); |
| 240 | } |
| 241 | |
| 242 | /** |
| 243 | *hns_rcb_set_port_coalesced_frames - set rcb port coalesced frames |
| 244 | *@rcb_common: rcb_common device |
| 245 | *@port_idx:port index |
| 246 | *@coalesced_frames:BD num for coalesced frames |
| 247 | */ |
| 248 | static int hns_rcb_set_port_coalesced_frames(struct rcb_common_cb *rcb_common, |
| 249 | u32 port_idx, |
| 250 | u32 coalesced_frames) |
| 251 | { |
| 252 | if (port_idx >= HNS_RCB_SERVICE_NW_ENGINE_NUM) |
| 253 | port_idx = 0; |
| 254 | if (coalesced_frames >= rcb_common->desc_num || |
| 255 | coalesced_frames > HNS_RCB_MAX_COALESCED_FRAMES) |
| 256 | return -EINVAL; |
| 257 | |
| 258 | dsaf_write_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4, |
| 259 | coalesced_frames); |
| 260 | return 0; |
| 261 | } |
| 262 | |
| 263 | /** |
| 264 | *hns_rcb_get_port_coalesced_frames - set rcb port coalesced frames |
| 265 | *@rcb_common: rcb_common device |
| 266 | *@port_idx:port index |
| 267 | * return coaleseced frames value |
| 268 | */ |
| 269 | static u32 hns_rcb_get_port_coalesced_frames(struct rcb_common_cb *rcb_common, |
| 270 | u32 port_idx) |
| 271 | { |
| 272 | if (port_idx >= HNS_RCB_SERVICE_NW_ENGINE_NUM) |
| 273 | port_idx = 0; |
| 274 | |
| 275 | return dsaf_read_dev(rcb_common, |
| 276 | RCB_CFG_PKTLINE_REG + port_idx * 4); |
| 277 | } |
| 278 | |
| 279 | /** |
| 280 | *hns_rcb_set_timeout - set rcb port coalesced time_out |
| 281 | *@rcb_common: rcb_common device |
| 282 | *@time_out:time for coalesced time_out |
| 283 | */ |
| 284 | static void hns_rcb_set_timeout(struct rcb_common_cb *rcb_common, |
| 285 | u32 timeout) |
| 286 | { |
| 287 | dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG, timeout); |
| 288 | } |
| 289 | |
| 290 | static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common) |
| 291 | { |
| 292 | if (rcb_common->comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) |
| 293 | return HNS_RCB_SERVICE_NW_ENGINE_NUM; |
| 294 | else |
| 295 | return HNS_RCB_DEBUG_NW_ENGINE_NUM; |
| 296 | } |
| 297 | |
| 298 | /*clr rcb comm exception irq**/ |
| 299 | static void hns_rcb_comm_exc_irq_en( |
| 300 | struct rcb_common_cb *rcb_common, int en) |
| 301 | { |
| 302 | u32 clr_vlue = 0xfffffffful; |
| 303 | u32 msk_vlue = en ? 0 : 0xfffffffful; |
| 304 | |
| 305 | /* clr int*/ |
| 306 | dsaf_write_dev(rcb_common, RCB_COM_INTSTS_ECC_ERR_REG, clr_vlue); |
| 307 | |
| 308 | dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_RING_STS, clr_vlue); |
| 309 | |
| 310 | dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_BD_RINT_STS, clr_vlue); |
| 311 | |
| 312 | dsaf_write_dev(rcb_common, RCB_COM_RINT_TX_PKT_REG, clr_vlue); |
| 313 | dsaf_write_dev(rcb_common, RCB_COM_AXI_ERR_STS, clr_vlue); |
| 314 | |
| 315 | /*en msk*/ |
| 316 | dsaf_write_dev(rcb_common, RCB_COM_INTMASK_ECC_ERR_REG, msk_vlue); |
| 317 | |
| 318 | dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_INTMASK_RING, msk_vlue); |
| 319 | |
| 320 | /*for tx bd neednot cacheline, so msk sf_txring_fbd_intmask (bit 1)**/ |
| 321 | dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_INTMASK_BD, msk_vlue | 2); |
| 322 | |
| 323 | dsaf_write_dev(rcb_common, RCB_COM_INTMSK_TX_PKT_REG, msk_vlue); |
| 324 | dsaf_write_dev(rcb_common, RCB_COM_AXI_WR_ERR_INTMASK, msk_vlue); |
| 325 | } |
| 326 | |
| 327 | /** |
| 328 | *hns_rcb_common_init_hw - init rcb common hardware |
| 329 | *@rcb_common: rcb_common device |
| 330 | *retuen 0 - success , negative --fail |
| 331 | */ |
| 332 | int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common) |
| 333 | { |
| 334 | u32 reg_val; |
| 335 | int i; |
| 336 | int port_num = hns_rcb_common_get_port_num(rcb_common); |
| 337 | |
| 338 | hns_rcb_comm_exc_irq_en(rcb_common, 0); |
| 339 | |
| 340 | reg_val = dsaf_read_dev(rcb_common, RCB_COM_CFG_INIT_FLAG_REG); |
| 341 | if (0x1 != (reg_val & 0x1)) { |
| 342 | dev_err(rcb_common->dsaf_dev->dev, |
| 343 | "RCB_COM_CFG_INIT_FLAG_REG reg = 0x%x\n", reg_val); |
| 344 | return -EBUSY; |
| 345 | } |
| 346 | |
| 347 | for (i = 0; i < port_num; i++) { |
| 348 | hns_rcb_set_port_desc_cnt(rcb_common, i, rcb_common->desc_num); |
| 349 | (void)hns_rcb_set_port_coalesced_frames( |
| 350 | rcb_common, i, rcb_common->coalesced_frames); |
| 351 | } |
| 352 | hns_rcb_set_timeout(rcb_common, rcb_common->timeout); |
| 353 | |
| 354 | dsaf_write_dev(rcb_common, RCB_COM_CFG_ENDIAN_REG, |
| 355 | HNS_RCB_COMMON_ENDIAN); |
| 356 | |
| 357 | return 0; |
| 358 | } |
| 359 | |
| 360 | int hns_rcb_buf_size2type(u32 buf_size) |
| 361 | { |
| 362 | int bd_size_type; |
| 363 | |
| 364 | switch (buf_size) { |
| 365 | case 512: |
| 366 | bd_size_type = HNS_BD_SIZE_512_TYPE; |
| 367 | break; |
| 368 | case 1024: |
| 369 | bd_size_type = HNS_BD_SIZE_1024_TYPE; |
| 370 | break; |
| 371 | case 2048: |
| 372 | bd_size_type = HNS_BD_SIZE_2048_TYPE; |
| 373 | break; |
| 374 | case 4096: |
| 375 | bd_size_type = HNS_BD_SIZE_4096_TYPE; |
| 376 | break; |
| 377 | default: |
| 378 | bd_size_type = -EINVAL; |
| 379 | } |
| 380 | |
| 381 | return bd_size_type; |
| 382 | } |
| 383 | |
| 384 | static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type) |
| 385 | { |
| 386 | struct hnae_ring *ring; |
| 387 | struct rcb_common_cb *rcb_common; |
| 388 | struct ring_pair_cb *ring_pair_cb; |
| 389 | u32 buf_size; |
| 390 | u16 desc_num; |
| 391 | int irq_idx; |
| 392 | |
| 393 | ring_pair_cb = container_of(q, struct ring_pair_cb, q); |
| 394 | if (ring_type == RX_RING) { |
| 395 | ring = &q->rx_ring; |
| 396 | ring->io_base = ring_pair_cb->q.io_base; |
| 397 | irq_idx = HNS_RCB_IRQ_IDX_RX; |
| 398 | } else { |
| 399 | ring = &q->tx_ring; |
| 400 | ring->io_base = (u8 __iomem *)ring_pair_cb->q.io_base + |
| 401 | HNS_RCB_TX_REG_OFFSET; |
| 402 | irq_idx = HNS_RCB_IRQ_IDX_TX; |
| 403 | } |
| 404 | |
| 405 | rcb_common = ring_pair_cb->rcb_common; |
| 406 | buf_size = rcb_common->dsaf_dev->buf_size; |
| 407 | desc_num = rcb_common->dsaf_dev->desc_num; |
| 408 | |
| 409 | ring->desc = NULL; |
| 410 | ring->desc_cb = NULL; |
| 411 | |
| 412 | ring->irq = ring_pair_cb->virq[irq_idx]; |
| 413 | ring->desc_dma_addr = 0; |
| 414 | |
| 415 | ring->buf_size = buf_size; |
| 416 | ring->desc_num = desc_num; |
| 417 | ring->max_desc_num_per_pkt = HNS_RCB_RING_MAX_BD_PER_PKT; |
| 418 | ring->max_raw_data_sz_per_desc = HNS_RCB_MAX_PKT_SIZE; |
| 419 | ring->max_pkt_size = HNS_RCB_MAX_PKT_SIZE; |
| 420 | ring->next_to_use = 0; |
| 421 | ring->next_to_clean = 0; |
| 422 | } |
| 423 | |
| 424 | static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb *ring_pair_cb) |
| 425 | { |
| 426 | ring_pair_cb->q.handle = NULL; |
| 427 | |
| 428 | hns_rcb_ring_get_cfg(&ring_pair_cb->q, RX_RING); |
| 429 | hns_rcb_ring_get_cfg(&ring_pair_cb->q, TX_RING); |
| 430 | } |
| 431 | |
| 432 | static int hns_rcb_get_port(struct rcb_common_cb *rcb_common, int ring_idx) |
| 433 | { |
| 434 | int comm_index = rcb_common->comm_index; |
| 435 | int port; |
| 436 | int q_num; |
| 437 | |
| 438 | if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { |
| 439 | q_num = (int)rcb_common->max_q_per_vf * rcb_common->max_vfn; |
| 440 | port = ring_idx / q_num; |
| 441 | } else { |
| 442 | port = HNS_RCB_SERVICE_NW_ENGINE_NUM + comm_index - 1; |
| 443 | } |
| 444 | |
| 445 | return port; |
| 446 | } |
| 447 | |
| 448 | static int hns_rcb_get_base_irq_idx(struct rcb_common_cb *rcb_common) |
| 449 | { |
| 450 | int comm_index = rcb_common->comm_index; |
| 451 | |
| 452 | if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) |
| 453 | return HNS_SERVICE_RING_IRQ_IDX; |
| 454 | else |
| 455 | return HNS_DEBUG_RING_IRQ_IDX + (comm_index - 1) * 2; |
| 456 | } |
| 457 | |
| 458 | #define RCB_COMM_BASE_TO_RING_BASE(base, ringid)\ |
| 459 | ((base) + 0x10000 + HNS_RCB_REG_OFFSET * (ringid)) |
| 460 | /** |
| 461 | *hns_rcb_get_cfg - get rcb config |
| 462 | *@rcb_common: rcb common device |
| 463 | */ |
| 464 | void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common) |
| 465 | { |
| 466 | struct ring_pair_cb *ring_pair_cb; |
| 467 | u32 i; |
| 468 | u32 ring_num = rcb_common->ring_num; |
| 469 | int base_irq_idx = hns_rcb_get_base_irq_idx(rcb_common); |
| 470 | struct device_node *np = rcb_common->dsaf_dev->dev->of_node; |
| 471 | |
| 472 | for (i = 0; i < ring_num; i++) { |
| 473 | ring_pair_cb = &rcb_common->ring_pair_cb[i]; |
| 474 | ring_pair_cb->rcb_common = rcb_common; |
| 475 | ring_pair_cb->dev = rcb_common->dsaf_dev->dev; |
| 476 | ring_pair_cb->index = i; |
| 477 | ring_pair_cb->q.io_base = |
| 478 | RCB_COMM_BASE_TO_RING_BASE(rcb_common->io_base, i); |
| 479 | ring_pair_cb->port_id_in_dsa = hns_rcb_get_port(rcb_common, i); |
| 480 | ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] |
| 481 | = irq_of_parse_and_map(np, base_irq_idx + i * 2); |
| 482 | ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX] |
| 483 | = irq_of_parse_and_map(np, base_irq_idx + i * 2 + 1); |
| 484 | ring_pair_cb->q.phy_base = |
| 485 | RCB_COMM_BASE_TO_RING_BASE(rcb_common->phy_base, i); |
| 486 | hns_rcb_ring_pair_get_cfg(ring_pair_cb); |
| 487 | } |
| 488 | } |
| 489 | |
| 490 | /** |
| 491 | *hns_rcb_get_coalesced_frames - get rcb port coalesced frames |
| 492 | *@rcb_common: rcb_common device |
| 493 | *@comm_index:port index |
| 494 | *return coalesced_frames |
| 495 | */ |
| 496 | u32 hns_rcb_get_coalesced_frames(struct dsaf_device *dsaf_dev, int port) |
| 497 | { |
| 498 | int comm_index = hns_dsaf_get_comm_idx_by_port(port); |
| 499 | struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index]; |
| 500 | |
| 501 | return hns_rcb_get_port_coalesced_frames(rcb_comm, port); |
| 502 | } |
| 503 | |
| 504 | /** |
| 505 | *hns_rcb_get_coalesce_usecs - get rcb port coalesced time_out |
| 506 | *@rcb_common: rcb_common device |
| 507 | *@comm_index:port index |
| 508 | *return time_out |
| 509 | */ |
| 510 | u32 hns_rcb_get_coalesce_usecs(struct dsaf_device *dsaf_dev, int comm_index) |
| 511 | { |
| 512 | struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index]; |
| 513 | |
| 514 | return rcb_comm->timeout; |
| 515 | } |
| 516 | |
| 517 | /** |
| 518 | *hns_rcb_set_coalesce_usecs - set rcb port coalesced time_out |
| 519 | *@rcb_common: rcb_common device |
| 520 | *@comm_index: comm :index |
| 521 | *@etx_usecs:tx time for coalesced time_out |
| 522 | *@rx_usecs:rx time for coalesced time_out |
| 523 | */ |
| 524 | void hns_rcb_set_coalesce_usecs(struct dsaf_device *dsaf_dev, |
| 525 | int port, u32 timeout) |
| 526 | { |
| 527 | int comm_index = hns_dsaf_get_comm_idx_by_port(port); |
| 528 | struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index]; |
| 529 | |
| 530 | if (rcb_comm->timeout == timeout) |
| 531 | return; |
| 532 | |
| 533 | if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { |
| 534 | dev_err(dsaf_dev->dev, |
| 535 | "error: not support coalesce_usecs setting!\n"); |
| 536 | return; |
| 537 | } |
| 538 | rcb_comm->timeout = timeout; |
| 539 | hns_rcb_set_timeout(rcb_comm, rcb_comm->timeout); |
| 540 | } |
| 541 | |
| 542 | /** |
| 543 | *hns_rcb_set_coalesced_frames - set rcb coalesced frames |
| 544 | *@rcb_common: rcb_common device |
| 545 | *@tx_frames:tx BD num for coalesced frames |
| 546 | *@rx_frames:rx BD num for coalesced frames |
| 547 | *Return 0 on success, negative on failure |
| 548 | */ |
| 549 | int hns_rcb_set_coalesced_frames(struct dsaf_device *dsaf_dev, |
| 550 | int port, u32 coalesced_frames) |
| 551 | { |
| 552 | int comm_index = hns_dsaf_get_comm_idx_by_port(port); |
| 553 | struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index]; |
| 554 | u32 coalesced_reg_val; |
| 555 | int ret; |
| 556 | |
| 557 | coalesced_reg_val = hns_rcb_get_port_coalesced_frames(rcb_comm, port); |
| 558 | |
| 559 | if (coalesced_reg_val == coalesced_frames) |
| 560 | return 0; |
| 561 | |
| 562 | if (coalesced_frames >= HNS_RCB_MIN_COALESCED_FRAMES) { |
| 563 | ret = hns_rcb_set_port_coalesced_frames(rcb_comm, port, |
| 564 | coalesced_frames); |
| 565 | return ret; |
| 566 | } else { |
| 567 | return -EINVAL; |
| 568 | } |
| 569 | } |
| 570 | |
| 571 | /** |
| 572 | *hns_rcb_get_queue_mode - get max VM number and max ring number per VM |
| 573 | * accordding to dsaf mode |
| 574 | *@dsaf_mode: dsaf mode |
| 575 | *@max_vfn : max vfn number |
| 576 | *@max_q_per_vf:max ring number per vm |
| 577 | */ |
| 578 | static void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, int comm_index, |
| 579 | u16 *max_vfn, u16 *max_q_per_vf) |
| 580 | { |
| 581 | if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { |
| 582 | switch (dsaf_mode) { |
| 583 | case DSAF_MODE_DISABLE_6PORT_0VM: |
| 584 | *max_vfn = 1; |
| 585 | *max_q_per_vf = 16; |
| 586 | break; |
| 587 | case DSAF_MODE_DISABLE_FIX: |
| 588 | *max_vfn = 1; |
| 589 | *max_q_per_vf = 1; |
| 590 | break; |
| 591 | case DSAF_MODE_DISABLE_2PORT_64VM: |
| 592 | *max_vfn = 64; |
| 593 | *max_q_per_vf = 1; |
| 594 | break; |
| 595 | case DSAF_MODE_DISABLE_6PORT_16VM: |
| 596 | *max_vfn = 16; |
| 597 | *max_q_per_vf = 1; |
| 598 | break; |
| 599 | default: |
| 600 | *max_vfn = 1; |
| 601 | *max_q_per_vf = 16; |
| 602 | break; |
| 603 | } |
| 604 | } else { |
| 605 | *max_vfn = 1; |
| 606 | *max_q_per_vf = 1; |
| 607 | } |
| 608 | } |
| 609 | |
| 610 | int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev, int comm_index) |
| 611 | { |
| 612 | if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { |
| 613 | switch (dsaf_dev->dsaf_mode) { |
| 614 | case DSAF_MODE_ENABLE_FIX: |
| 615 | return 1; |
| 616 | |
| 617 | case DSAF_MODE_DISABLE_FIX: |
| 618 | return 6; |
| 619 | |
| 620 | case DSAF_MODE_ENABLE_0VM: |
| 621 | return 32; |
| 622 | |
| 623 | case DSAF_MODE_DISABLE_6PORT_0VM: |
| 624 | case DSAF_MODE_ENABLE_16VM: |
| 625 | case DSAF_MODE_DISABLE_6PORT_2VM: |
| 626 | case DSAF_MODE_DISABLE_6PORT_16VM: |
| 627 | case DSAF_MODE_DISABLE_6PORT_4VM: |
| 628 | case DSAF_MODE_ENABLE_8VM: |
| 629 | return 96; |
| 630 | |
| 631 | case DSAF_MODE_DISABLE_2PORT_16VM: |
| 632 | case DSAF_MODE_DISABLE_2PORT_8VM: |
| 633 | case DSAF_MODE_ENABLE_32VM: |
| 634 | case DSAF_MODE_DISABLE_2PORT_64VM: |
| 635 | case DSAF_MODE_ENABLE_128VM: |
| 636 | return 128; |
| 637 | |
| 638 | default: |
| 639 | dev_warn(dsaf_dev->dev, |
| 640 | "get ring num fail,use default!dsaf_mode=%d\n", |
| 641 | dsaf_dev->dsaf_mode); |
| 642 | return 128; |
| 643 | } |
| 644 | } else { |
| 645 | return 1; |
| 646 | } |
| 647 | } |
| 648 | |
| 649 | void __iomem *hns_rcb_common_get_vaddr(struct dsaf_device *dsaf_dev, |
| 650 | int comm_index) |
| 651 | { |
| 652 | void __iomem *base_addr; |
| 653 | |
| 654 | if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) |
| 655 | base_addr = dsaf_dev->ppe_base + RCB_COMMON_REG_OFFSET; |
| 656 | else |
| 657 | base_addr = dsaf_dev->sds_base |
| 658 | + (comm_index - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET |
| 659 | + RCB_COMMON_REG_OFFSET; |
| 660 | |
| 661 | return base_addr; |
| 662 | } |
| 663 | |
| 664 | static phys_addr_t hns_rcb_common_get_paddr(struct dsaf_device *dsaf_dev, |
| 665 | int comm_index) |
| 666 | { |
| 667 | struct device_node *np = dsaf_dev->dev->of_node; |
| 668 | phys_addr_t phy_addr; |
| 669 | const __be32 *tmp_addr; |
| 670 | u64 addr_offset = 0; |
| 671 | u64 size = 0; |
| 672 | int index = 0; |
| 673 | |
| 674 | if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { |
| 675 | index = 2; |
| 676 | addr_offset = RCB_COMMON_REG_OFFSET; |
| 677 | } else { |
| 678 | index = 1; |
| 679 | addr_offset = (comm_index - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET + |
| 680 | RCB_COMMON_REG_OFFSET; |
| 681 | } |
| 682 | tmp_addr = of_get_address(np, index, &size, NULL); |
| 683 | phy_addr = of_translate_address(np, tmp_addr); |
| 684 | return phy_addr + addr_offset; |
| 685 | } |
| 686 | |
| 687 | int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev, |
| 688 | int comm_index) |
| 689 | { |
| 690 | struct rcb_common_cb *rcb_common; |
| 691 | enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode; |
| 692 | u16 max_vfn; |
| 693 | u16 max_q_per_vf; |
| 694 | int ring_num = hns_rcb_get_ring_num(dsaf_dev, comm_index); |
| 695 | |
| 696 | rcb_common = |
| 697 | devm_kzalloc(dsaf_dev->dev, sizeof(*rcb_common) + |
| 698 | ring_num * sizeof(struct ring_pair_cb), GFP_KERNEL); |
| 699 | if (!rcb_common) { |
| 700 | dev_err(dsaf_dev->dev, "rcb common devm_kzalloc fail!\n"); |
| 701 | return -ENOMEM; |
| 702 | } |
| 703 | rcb_common->comm_index = comm_index; |
| 704 | rcb_common->ring_num = ring_num; |
| 705 | rcb_common->dsaf_dev = dsaf_dev; |
| 706 | |
| 707 | rcb_common->desc_num = dsaf_dev->desc_num; |
| 708 | rcb_common->coalesced_frames = HNS_RCB_DEF_COALESCED_FRAMES; |
| 709 | rcb_common->timeout = HNS_RCB_MAX_TIME_OUT; |
| 710 | |
| 711 | hns_rcb_get_queue_mode(dsaf_mode, comm_index, &max_vfn, &max_q_per_vf); |
| 712 | rcb_common->max_vfn = max_vfn; |
| 713 | rcb_common->max_q_per_vf = max_q_per_vf; |
| 714 | |
| 715 | rcb_common->io_base = hns_rcb_common_get_vaddr(dsaf_dev, comm_index); |
| 716 | rcb_common->phy_base = hns_rcb_common_get_paddr(dsaf_dev, comm_index); |
| 717 | |
| 718 | dsaf_dev->rcb_common[comm_index] = rcb_common; |
| 719 | return 0; |
| 720 | } |
| 721 | |
| 722 | void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev, |
| 723 | u32 comm_index) |
| 724 | { |
| 725 | dsaf_dev->rcb_common[comm_index] = NULL; |
| 726 | } |
| 727 | |
| 728 | void hns_rcb_update_stats(struct hnae_queue *queue) |
| 729 | { |
| 730 | struct ring_pair_cb *ring = |
| 731 | container_of(queue, struct ring_pair_cb, q); |
| 732 | struct dsaf_device *dsaf_dev = ring->rcb_common->dsaf_dev; |
| 733 | struct ppe_common_cb *ppe_common |
| 734 | = dsaf_dev->ppe_common[ring->rcb_common->comm_index]; |
| 735 | struct hns_ring_hw_stats *hw_stats = &ring->hw_stats; |
| 736 | |
| 737 | hw_stats->rx_pkts += dsaf_read_dev(queue, |
| 738 | RCB_RING_RX_RING_PKTNUM_RECORD_REG); |
| 739 | dsaf_write_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG, 0x1); |
| 740 | |
| 741 | hw_stats->ppe_rx_ok_pkts += dsaf_read_dev(ppe_common, |
| 742 | PPE_COM_HIS_RX_PKT_QID_OK_CNT_REG + 4 * ring->index); |
| 743 | hw_stats->ppe_rx_drop_pkts += dsaf_read_dev(ppe_common, |
| 744 | PPE_COM_HIS_RX_PKT_QID_DROP_CNT_REG + 4 * ring->index); |
| 745 | |
| 746 | hw_stats->tx_pkts += dsaf_read_dev(queue, |
| 747 | RCB_RING_TX_RING_PKTNUM_RECORD_REG); |
| 748 | dsaf_write_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG, 0x1); |
| 749 | |
| 750 | hw_stats->ppe_tx_ok_pkts += dsaf_read_dev(ppe_common, |
| 751 | PPE_COM_HIS_TX_PKT_QID_OK_CNT_REG + 4 * ring->index); |
| 752 | hw_stats->ppe_tx_drop_pkts += dsaf_read_dev(ppe_common, |
| 753 | PPE_COM_HIS_TX_PKT_QID_ERR_CNT_REG + 4 * ring->index); |
| 754 | } |
| 755 | |
| 756 | /** |
| 757 | *hns_rcb_get_stats - get rcb statistic |
| 758 | *@ring: rcb ring |
| 759 | *@data:statistic value |
| 760 | */ |
| 761 | void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data) |
| 762 | { |
| 763 | u64 *regs_buff = data; |
| 764 | struct ring_pair_cb *ring = |
| 765 | container_of(queue, struct ring_pair_cb, q); |
| 766 | struct hns_ring_hw_stats *hw_stats = &ring->hw_stats; |
| 767 | |
| 768 | regs_buff[0] = hw_stats->tx_pkts; |
| 769 | regs_buff[1] = hw_stats->ppe_tx_ok_pkts; |
| 770 | regs_buff[2] = hw_stats->ppe_tx_drop_pkts; |
| 771 | regs_buff[3] = |
| 772 | dsaf_read_dev(queue, RCB_RING_TX_RING_FBDNUM_REG); |
| 773 | |
| 774 | regs_buff[4] = queue->tx_ring.stats.tx_pkts; |
| 775 | regs_buff[5] = queue->tx_ring.stats.tx_bytes; |
| 776 | regs_buff[6] = queue->tx_ring.stats.tx_err_cnt; |
| 777 | regs_buff[7] = queue->tx_ring.stats.io_err_cnt; |
| 778 | regs_buff[8] = queue->tx_ring.stats.sw_err_cnt; |
| 779 | regs_buff[9] = queue->tx_ring.stats.seg_pkt_cnt; |
| 780 | regs_buff[10] = queue->tx_ring.stats.restart_queue; |
| 781 | regs_buff[11] = queue->tx_ring.stats.tx_busy; |
| 782 | |
| 783 | regs_buff[12] = hw_stats->rx_pkts; |
| 784 | regs_buff[13] = hw_stats->ppe_rx_ok_pkts; |
| 785 | regs_buff[14] = hw_stats->ppe_rx_drop_pkts; |
| 786 | regs_buff[15] = |
| 787 | dsaf_read_dev(queue, RCB_RING_RX_RING_FBDNUM_REG); |
| 788 | |
| 789 | regs_buff[16] = queue->rx_ring.stats.rx_pkts; |
| 790 | regs_buff[17] = queue->rx_ring.stats.rx_bytes; |
| 791 | regs_buff[18] = queue->rx_ring.stats.rx_err_cnt; |
| 792 | regs_buff[19] = queue->rx_ring.stats.io_err_cnt; |
| 793 | regs_buff[20] = queue->rx_ring.stats.sw_err_cnt; |
| 794 | regs_buff[21] = queue->rx_ring.stats.seg_pkt_cnt; |
| 795 | regs_buff[22] = queue->rx_ring.stats.reuse_pg_cnt; |
| 796 | regs_buff[23] = queue->rx_ring.stats.err_pkt_len; |
| 797 | regs_buff[24] = queue->rx_ring.stats.non_vld_descs; |
| 798 | regs_buff[25] = queue->rx_ring.stats.err_bd_num; |
| 799 | regs_buff[26] = queue->rx_ring.stats.l2_err; |
| 800 | regs_buff[27] = queue->rx_ring.stats.l3l4_csum_err; |
| 801 | } |
| 802 | |
| 803 | /** |
| 804 | *hns_rcb_get_ring_sset_count - rcb string set count |
| 805 | *@stringset:ethtool cmd |
| 806 | *return rcb ring string set count |
| 807 | */ |
| 808 | int hns_rcb_get_ring_sset_count(int stringset) |
| 809 | { |
| 810 | if (stringset == ETH_SS_STATS) |
| 811 | return HNS_RING_STATIC_REG_NUM; |
| 812 | |
| 813 | return 0; |
| 814 | } |
| 815 | |
| 816 | /** |
| 817 | *hns_rcb_get_common_regs_count - rcb common regs count |
| 818 | *return regs count |
| 819 | */ |
| 820 | int hns_rcb_get_common_regs_count(void) |
| 821 | { |
| 822 | return HNS_RCB_COMMON_DUMP_REG_NUM; |
| 823 | } |
| 824 | |
| 825 | /** |
| 826 | *rcb_get_sset_count - rcb ring regs count |
| 827 | *return regs count |
| 828 | */ |
| 829 | int hns_rcb_get_ring_regs_count(void) |
| 830 | { |
| 831 | return HNS_RCB_RING_DUMP_REG_NUM; |
| 832 | } |
| 833 | |
| 834 | /** |
| 835 | *hns_rcb_get_strings - get rcb string set |
| 836 | *@stringset:string set index |
| 837 | *@data:strings name value |
| 838 | *@index:queue index |
| 839 | */ |
| 840 | void hns_rcb_get_strings(int stringset, u8 *data, int index) |
| 841 | { |
| 842 | char *buff = (char *)data; |
| 843 | |
| 844 | if (stringset != ETH_SS_STATS) |
| 845 | return; |
| 846 | |
| 847 | snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_rcb_pkt_num", index); |
| 848 | buff = buff + ETH_GSTRING_LEN; |
| 849 | snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_ppe_tx_pkt_num", index); |
| 850 | buff = buff + ETH_GSTRING_LEN; |
| 851 | snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_ppe_drop_pkt_num", index); |
| 852 | buff = buff + ETH_GSTRING_LEN; |
| 853 | snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_fbd_num", index); |
| 854 | buff = buff + ETH_GSTRING_LEN; |
| 855 | |
| 856 | snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_pkt_num", index); |
| 857 | buff = buff + ETH_GSTRING_LEN; |
| 858 | snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_bytes", index); |
| 859 | buff = buff + ETH_GSTRING_LEN; |
| 860 | snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_err_cnt", index); |
| 861 | buff = buff + ETH_GSTRING_LEN; |
| 862 | snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_io_err", index); |
| 863 | buff = buff + ETH_GSTRING_LEN; |
| 864 | snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_sw_err", index); |
| 865 | buff = buff + ETH_GSTRING_LEN; |
| 866 | snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_seg_pkt", index); |
| 867 | buff = buff + ETH_GSTRING_LEN; |
| 868 | snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_restart_queue", index); |
| 869 | buff = buff + ETH_GSTRING_LEN; |
| 870 | snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_tx_busy", index); |
| 871 | buff = buff + ETH_GSTRING_LEN; |
| 872 | |
| 873 | snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_rcb_pkt_num", index); |
| 874 | buff = buff + ETH_GSTRING_LEN; |
| 875 | snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_ppe_pkt_num", index); |
| 876 | buff = buff + ETH_GSTRING_LEN; |
| 877 | snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_ppe_drop_pkt_num", index); |
| 878 | buff = buff + ETH_GSTRING_LEN; |
| 879 | snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_fbd_num", index); |
| 880 | buff = buff + ETH_GSTRING_LEN; |
| 881 | |
| 882 | snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_pkt_num", index); |
| 883 | buff = buff + ETH_GSTRING_LEN; |
| 884 | snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_bytes", index); |
| 885 | buff = buff + ETH_GSTRING_LEN; |
| 886 | snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_err_cnt", index); |
| 887 | buff = buff + ETH_GSTRING_LEN; |
| 888 | snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_io_err", index); |
| 889 | buff = buff + ETH_GSTRING_LEN; |
| 890 | snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_sw_err", index); |
| 891 | buff = buff + ETH_GSTRING_LEN; |
| 892 | snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_seg_pkt", index); |
| 893 | buff = buff + ETH_GSTRING_LEN; |
| 894 | snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_reuse_pg", index); |
| 895 | buff = buff + ETH_GSTRING_LEN; |
| 896 | snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_len_err", index); |
| 897 | buff = buff + ETH_GSTRING_LEN; |
| 898 | snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_non_vld_desc_err", index); |
| 899 | buff = buff + ETH_GSTRING_LEN; |
| 900 | snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_bd_num_err", index); |
| 901 | buff = buff + ETH_GSTRING_LEN; |
| 902 | snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_l2_err", index); |
| 903 | buff = buff + ETH_GSTRING_LEN; |
| 904 | snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_l3l4csum_err", index); |
| 905 | } |
| 906 | |
| 907 | void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data) |
| 908 | { |
| 909 | u32 *regs = data; |
| 910 | u32 i = 0; |
| 911 | |
| 912 | /*rcb common registers */ |
| 913 | regs[0] = dsaf_read_dev(rcb_com, RCB_COM_CFG_ENDIAN_REG); |
| 914 | regs[1] = dsaf_read_dev(rcb_com, RCB_COM_CFG_SYS_FSH_REG); |
| 915 | regs[2] = dsaf_read_dev(rcb_com, RCB_COM_CFG_INIT_FLAG_REG); |
| 916 | |
| 917 | regs[3] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PKT_REG); |
| 918 | regs[4] = dsaf_read_dev(rcb_com, RCB_COM_CFG_RINVLD_REG); |
| 919 | regs[5] = dsaf_read_dev(rcb_com, RCB_COM_CFG_FNA_REG); |
| 920 | regs[6] = dsaf_read_dev(rcb_com, RCB_COM_CFG_FA_REG); |
| 921 | regs[7] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PKT_TC_BP_REG); |
| 922 | regs[8] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PPE_TNL_CLKEN_REG); |
| 923 | |
| 924 | regs[9] = dsaf_read_dev(rcb_com, RCB_COM_INTMSK_TX_PKT_REG); |
| 925 | regs[10] = dsaf_read_dev(rcb_com, RCB_COM_RINT_TX_PKT_REG); |
| 926 | regs[11] = dsaf_read_dev(rcb_com, RCB_COM_INTMASK_ECC_ERR_REG); |
| 927 | regs[12] = dsaf_read_dev(rcb_com, RCB_COM_INTSTS_ECC_ERR_REG); |
| 928 | regs[13] = dsaf_read_dev(rcb_com, RCB_COM_EBD_SRAM_ERR_REG); |
| 929 | regs[14] = dsaf_read_dev(rcb_com, RCB_COM_RXRING_ERR_REG); |
| 930 | regs[15] = dsaf_read_dev(rcb_com, RCB_COM_TXRING_ERR_REG); |
| 931 | regs[16] = dsaf_read_dev(rcb_com, RCB_COM_TX_FBD_ERR_REG); |
| 932 | regs[17] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK_EN_REG); |
| 933 | regs[18] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK0_REG); |
| 934 | regs[19] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK1_REG); |
| 935 | regs[20] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK2_REG); |
| 936 | regs[21] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK3_REG); |
| 937 | regs[22] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK4_REG); |
| 938 | regs[23] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK5_REG); |
| 939 | regs[24] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR0_REG); |
| 940 | regs[25] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR3_REG); |
| 941 | regs[26] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR4_REG); |
| 942 | regs[27] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR5_REG); |
| 943 | |
| 944 | regs[28] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_INTMASK_RING); |
| 945 | regs[29] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_RING_STS); |
| 946 | regs[30] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_RING); |
| 947 | regs[31] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_INTMASK_BD); |
| 948 | regs[32] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_BD_RINT_STS); |
| 949 | regs[33] = dsaf_read_dev(rcb_com, RCB_COM_RCB_RD_BD_BUSY); |
| 950 | regs[34] = dsaf_read_dev(rcb_com, RCB_COM_RCB_FBD_CRT_EN); |
| 951 | regs[35] = dsaf_read_dev(rcb_com, RCB_COM_AXI_WR_ERR_INTMASK); |
| 952 | regs[36] = dsaf_read_dev(rcb_com, RCB_COM_AXI_ERR_STS); |
| 953 | regs[37] = dsaf_read_dev(rcb_com, RCB_COM_CHK_TX_FBD_NUM_REG); |
| 954 | |
| 955 | /* rcb common entry registers */ |
| 956 | for (i = 0; i < 16; i++) { /* total 16 model registers */ |
| 957 | regs[38 + i] |
| 958 | = dsaf_read_dev(rcb_com, RCB_CFG_BD_NUM_REG + 4 * i); |
| 959 | regs[54 + i] |
| 960 | = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_REG + 4 * i); |
| 961 | } |
| 962 | |
| 963 | regs[70] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_REG); |
| 964 | regs[71] = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_INT_NUM_REG); |
| 965 | regs[72] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_INT_NUM_REG); |
| 966 | |
| 967 | /* mark end of rcb common regs */ |
| 968 | for (i = 73; i < 80; i++) |
| 969 | regs[i] = 0xcccccccc; |
| 970 | } |
| 971 | |
| 972 | void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data) |
| 973 | { |
| 974 | u32 *regs = data; |
| 975 | struct ring_pair_cb *ring_pair |
| 976 | = container_of(queue, struct ring_pair_cb, q); |
| 977 | u32 i = 0; |
| 978 | |
| 979 | /*rcb ring registers */ |
| 980 | regs[0] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_L_REG); |
| 981 | regs[1] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_H_REG); |
| 982 | regs[2] = dsaf_read_dev(queue, RCB_RING_RX_RING_BD_NUM_REG); |
| 983 | regs[3] = dsaf_read_dev(queue, RCB_RING_RX_RING_BD_LEN_REG); |
| 984 | regs[4] = dsaf_read_dev(queue, RCB_RING_RX_RING_PKTLINE_REG); |
| 985 | regs[5] = dsaf_read_dev(queue, RCB_RING_RX_RING_TAIL_REG); |
| 986 | regs[6] = dsaf_read_dev(queue, RCB_RING_RX_RING_HEAD_REG); |
| 987 | regs[7] = dsaf_read_dev(queue, RCB_RING_RX_RING_FBDNUM_REG); |
| 988 | regs[8] = dsaf_read_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG); |
| 989 | |
| 990 | regs[9] = dsaf_read_dev(queue, RCB_RING_TX_RING_BASEADDR_L_REG); |
| 991 | regs[10] = dsaf_read_dev(queue, RCB_RING_TX_RING_BASEADDR_H_REG); |
| 992 | regs[11] = dsaf_read_dev(queue, RCB_RING_TX_RING_BD_NUM_REG); |
| 993 | regs[12] = dsaf_read_dev(queue, RCB_RING_TX_RING_BD_LEN_REG); |
| 994 | regs[13] = dsaf_read_dev(queue, RCB_RING_TX_RING_PKTLINE_REG); |
| 995 | regs[15] = dsaf_read_dev(queue, RCB_RING_TX_RING_TAIL_REG); |
| 996 | regs[16] = dsaf_read_dev(queue, RCB_RING_TX_RING_HEAD_REG); |
| 997 | regs[17] = dsaf_read_dev(queue, RCB_RING_TX_RING_FBDNUM_REG); |
| 998 | regs[18] = dsaf_read_dev(queue, RCB_RING_TX_RING_OFFSET_REG); |
| 999 | regs[19] = dsaf_read_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG); |
| 1000 | |
| 1001 | regs[20] = dsaf_read_dev(queue, RCB_RING_PREFETCH_EN_REG); |
| 1002 | regs[21] = dsaf_read_dev(queue, RCB_RING_CFG_VF_NUM_REG); |
| 1003 | regs[22] = dsaf_read_dev(queue, RCB_RING_ASID_REG); |
| 1004 | regs[23] = dsaf_read_dev(queue, RCB_RING_RX_VM_REG); |
| 1005 | regs[24] = dsaf_read_dev(queue, RCB_RING_T0_BE_RST); |
| 1006 | regs[25] = dsaf_read_dev(queue, RCB_RING_COULD_BE_RST); |
| 1007 | regs[26] = dsaf_read_dev(queue, RCB_RING_WRR_WEIGHT_REG); |
| 1008 | |
| 1009 | regs[27] = dsaf_read_dev(queue, RCB_RING_INTMSK_RXWL_REG); |
| 1010 | regs[28] = dsaf_read_dev(queue, RCB_RING_INTSTS_RX_RING_REG); |
| 1011 | regs[29] = dsaf_read_dev(queue, RCB_RING_INTMSK_TXWL_REG); |
| 1012 | regs[30] = dsaf_read_dev(queue, RCB_RING_INTSTS_TX_RING_REG); |
| 1013 | regs[31] = dsaf_read_dev(queue, RCB_RING_INTMSK_RX_OVERTIME_REG); |
| 1014 | regs[32] = dsaf_read_dev(queue, RCB_RING_INTSTS_RX_OVERTIME_REG); |
| 1015 | regs[33] = dsaf_read_dev(queue, RCB_RING_INTMSK_TX_OVERTIME_REG); |
| 1016 | regs[34] = dsaf_read_dev(queue, RCB_RING_INTSTS_TX_OVERTIME_REG); |
| 1017 | |
| 1018 | /* mark end of ring regs */ |
| 1019 | for (i = 35; i < 40; i++) |
| 1020 | regs[i] = 0xcccccc00 + ring_pair->index; |
| 1021 | } |