Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2015 Cavium, Inc. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify it |
| 5 | * under the terms of version 2 of the GNU General Public License |
| 6 | * as published by the Free Software Foundation. |
| 7 | */ |
| 8 | |
| 9 | #include <linux/pci.h> |
| 10 | #include <linux/netdevice.h> |
| 11 | #include <linux/ip.h> |
| 12 | #include <linux/etherdevice.h> |
| 13 | #include <net/ip.h> |
| 14 | #include <net/tso.h> |
| 15 | |
| 16 | #include "nic_reg.h" |
| 17 | #include "nic.h" |
| 18 | #include "q_struct.h" |
| 19 | #include "nicvf_queues.h" |
| 20 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 21 | /* Poll a register for a specific value */ |
| 22 | static int nicvf_poll_reg(struct nicvf *nic, int qidx, |
| 23 | u64 reg, int bit_pos, int bits, int val) |
| 24 | { |
| 25 | u64 bit_mask; |
| 26 | u64 reg_val; |
| 27 | int timeout = 10; |
| 28 | |
| 29 | bit_mask = (1ULL << bits) - 1; |
| 30 | bit_mask = (bit_mask << bit_pos); |
| 31 | |
| 32 | while (timeout) { |
| 33 | reg_val = nicvf_queue_reg_read(nic, reg, qidx); |
| 34 | if (((reg_val & bit_mask) >> bit_pos) == val) |
| 35 | return 0; |
| 36 | usleep_range(1000, 2000); |
| 37 | timeout--; |
| 38 | } |
| 39 | netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg); |
| 40 | return 1; |
| 41 | } |
| 42 | |
| 43 | /* Allocate memory for a queue's descriptors */ |
| 44 | static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem, |
| 45 | int q_len, int desc_size, int align_bytes) |
| 46 | { |
| 47 | dmem->q_len = q_len; |
| 48 | dmem->size = (desc_size * q_len) + align_bytes; |
| 49 | /* Save address, need it while freeing */ |
| 50 | dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size, |
| 51 | &dmem->dma, GFP_KERNEL); |
| 52 | if (!dmem->unalign_base) |
| 53 | return -ENOMEM; |
| 54 | |
| 55 | /* Align memory address for 'align_bytes' */ |
| 56 | dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes); |
Aleksey Makarov | 39a0dd0 | 2015-06-02 11:00:25 -0700 | [diff] [blame] | 57 | dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 58 | return 0; |
| 59 | } |
| 60 | |
| 61 | /* Free queue's descriptor memory */ |
| 62 | static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem) |
| 63 | { |
| 64 | if (!dmem) |
| 65 | return; |
| 66 | |
| 67 | dma_free_coherent(&nic->pdev->dev, dmem->size, |
| 68 | dmem->unalign_base, dmem->dma); |
| 69 | dmem->unalign_base = NULL; |
| 70 | dmem->base = NULL; |
| 71 | } |
| 72 | |
| 73 | /* Allocate buffer for packet reception |
| 74 | * HW returns memory address where packet is DMA'ed but not a pointer |
| 75 | * into RBDR ring, so save buffer address at the start of fragment and |
| 76 | * align the start address to a cache aligned address |
| 77 | */ |
| 78 | static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, |
| 79 | u32 buf_len, u64 **rbuf) |
| 80 | { |
Sunil Goutham | 6e4be8d | 2016-02-11 21:50:26 +0530 | [diff] [blame^] | 81 | int order = (PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 82 | |
| 83 | /* Check if request can be accomodated in previous allocated page */ |
| 84 | if (nic->rb_page) { |
| 85 | if ((nic->rb_page_offset + buf_len + buf_len) > |
| 86 | (PAGE_SIZE << order)) { |
| 87 | nic->rb_page = NULL; |
| 88 | } else { |
| 89 | nic->rb_page_offset += buf_len; |
| 90 | get_page(nic->rb_page); |
| 91 | } |
| 92 | } |
| 93 | |
| 94 | /* Allocate a new page */ |
| 95 | if (!nic->rb_page) { |
Sunil Goutham | f8ce966 | 2015-07-29 16:49:41 +0300 | [diff] [blame] | 96 | nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, |
| 97 | order); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 98 | if (!nic->rb_page) { |
Thanneeru Srinivasulu | a05d484 | 2016-02-11 21:50:21 +0530 | [diff] [blame] | 99 | nic->drv_stats.rcv_buffer_alloc_failures++; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 100 | return -ENOMEM; |
| 101 | } |
| 102 | nic->rb_page_offset = 0; |
| 103 | } |
| 104 | |
Sunil Goutham | 668dda0 | 2015-12-07 10:30:33 +0530 | [diff] [blame] | 105 | *rbuf = (u64 *)((u64)page_address(nic->rb_page) + nic->rb_page_offset); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 106 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 107 | return 0; |
| 108 | } |
| 109 | |
Sunil Goutham | 668dda0 | 2015-12-07 10:30:33 +0530 | [diff] [blame] | 110 | /* Build skb around receive buffer */ |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 111 | static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic, |
| 112 | u64 rb_ptr, int len) |
| 113 | { |
Sunil Goutham | 668dda0 | 2015-12-07 10:30:33 +0530 | [diff] [blame] | 114 | void *data; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 115 | struct sk_buff *skb; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 116 | |
Sunil Goutham | 668dda0 | 2015-12-07 10:30:33 +0530 | [diff] [blame] | 117 | data = phys_to_virt(rb_ptr); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 118 | |
| 119 | /* Now build an skb to give to stack */ |
Sunil Goutham | 668dda0 | 2015-12-07 10:30:33 +0530 | [diff] [blame] | 120 | skb = build_skb(data, RCV_FRAG_LEN); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 121 | if (!skb) { |
Sunil Goutham | 668dda0 | 2015-12-07 10:30:33 +0530 | [diff] [blame] | 122 | put_page(virt_to_page(data)); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 123 | return NULL; |
| 124 | } |
| 125 | |
Sunil Goutham | 668dda0 | 2015-12-07 10:30:33 +0530 | [diff] [blame] | 126 | prefetch(skb->data); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 127 | return skb; |
| 128 | } |
| 129 | |
| 130 | /* Allocate RBDR ring and populate receive buffers */ |
| 131 | static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, |
| 132 | int ring_len, int buf_size) |
| 133 | { |
| 134 | int idx; |
| 135 | u64 *rbuf; |
| 136 | struct rbdr_entry_t *desc; |
| 137 | int err; |
| 138 | |
| 139 | err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len, |
| 140 | sizeof(struct rbdr_entry_t), |
| 141 | NICVF_RCV_BUF_ALIGN_BYTES); |
| 142 | if (err) |
| 143 | return err; |
| 144 | |
| 145 | rbdr->desc = rbdr->dmem.base; |
| 146 | /* Buffer size has to be in multiples of 128 bytes */ |
| 147 | rbdr->dma_size = buf_size; |
| 148 | rbdr->enable = true; |
| 149 | rbdr->thresh = RBDR_THRESH; |
| 150 | |
| 151 | nic->rb_page = NULL; |
| 152 | for (idx = 0; idx < ring_len; idx++) { |
| 153 | err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN, |
| 154 | &rbuf); |
| 155 | if (err) |
| 156 | return err; |
| 157 | |
| 158 | desc = GET_RBDR_DESC(rbdr, idx); |
| 159 | desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; |
| 160 | } |
| 161 | return 0; |
| 162 | } |
| 163 | |
| 164 | /* Free RBDR ring and its receive buffers */ |
| 165 | static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) |
| 166 | { |
| 167 | int head, tail; |
| 168 | u64 buf_addr; |
| 169 | struct rbdr_entry_t *desc; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 170 | |
| 171 | if (!rbdr) |
| 172 | return; |
| 173 | |
| 174 | rbdr->enable = false; |
| 175 | if (!rbdr->dmem.base) |
| 176 | return; |
| 177 | |
| 178 | head = rbdr->head; |
| 179 | tail = rbdr->tail; |
| 180 | |
| 181 | /* Free SKBs */ |
| 182 | while (head != tail) { |
| 183 | desc = GET_RBDR_DESC(rbdr, head); |
| 184 | buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; |
Sunil Goutham | 668dda0 | 2015-12-07 10:30:33 +0530 | [diff] [blame] | 185 | put_page(virt_to_page(phys_to_virt(buf_addr))); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 186 | head++; |
| 187 | head &= (rbdr->dmem.q_len - 1); |
| 188 | } |
| 189 | /* Free SKB of tail desc */ |
| 190 | desc = GET_RBDR_DESC(rbdr, tail); |
| 191 | buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; |
Sunil Goutham | 668dda0 | 2015-12-07 10:30:33 +0530 | [diff] [blame] | 192 | put_page(virt_to_page(phys_to_virt(buf_addr))); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 193 | |
| 194 | /* Free RBDR ring */ |
| 195 | nicvf_free_q_desc_mem(nic, &rbdr->dmem); |
| 196 | } |
| 197 | |
| 198 | /* Refill receive buffer descriptors with new buffers. |
| 199 | */ |
Aleksey Makarov | fd7ec06 | 2015-06-02 11:00:23 -0700 | [diff] [blame] | 200 | static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp) |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 201 | { |
| 202 | struct queue_set *qs = nic->qs; |
| 203 | int rbdr_idx = qs->rbdr_cnt; |
| 204 | int tail, qcount; |
| 205 | int refill_rb_cnt; |
| 206 | struct rbdr *rbdr; |
| 207 | struct rbdr_entry_t *desc; |
| 208 | u64 *rbuf; |
| 209 | int new_rb = 0; |
| 210 | |
| 211 | refill: |
| 212 | if (!rbdr_idx) |
| 213 | return; |
| 214 | rbdr_idx--; |
| 215 | rbdr = &qs->rbdr[rbdr_idx]; |
| 216 | /* Check if it's enabled */ |
| 217 | if (!rbdr->enable) |
| 218 | goto next_rbdr; |
| 219 | |
| 220 | /* Get no of desc's to be refilled */ |
| 221 | qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx); |
| 222 | qcount &= 0x7FFFF; |
| 223 | /* Doorbell can be ringed with a max of ring size minus 1 */ |
| 224 | if (qcount >= (qs->rbdr_len - 1)) |
| 225 | goto next_rbdr; |
| 226 | else |
| 227 | refill_rb_cnt = qs->rbdr_len - qcount - 1; |
| 228 | |
| 229 | /* Start filling descs from tail */ |
| 230 | tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3; |
| 231 | while (refill_rb_cnt) { |
| 232 | tail++; |
| 233 | tail &= (rbdr->dmem.q_len - 1); |
| 234 | |
| 235 | if (nicvf_alloc_rcv_buffer(nic, gfp, RCV_FRAG_LEN, &rbuf)) |
| 236 | break; |
| 237 | |
| 238 | desc = GET_RBDR_DESC(rbdr, tail); |
| 239 | desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; |
| 240 | refill_rb_cnt--; |
| 241 | new_rb++; |
| 242 | } |
| 243 | |
| 244 | /* make sure all memory stores are done before ringing doorbell */ |
| 245 | smp_wmb(); |
| 246 | |
| 247 | /* Check if buffer allocation failed */ |
| 248 | if (refill_rb_cnt) |
| 249 | nic->rb_alloc_fail = true; |
| 250 | else |
| 251 | nic->rb_alloc_fail = false; |
| 252 | |
| 253 | /* Notify HW */ |
| 254 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, |
| 255 | rbdr_idx, new_rb); |
| 256 | next_rbdr: |
| 257 | /* Re-enable RBDR interrupts only if buffer allocation is success */ |
| 258 | if (!nic->rb_alloc_fail && rbdr->enable) |
| 259 | nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx); |
| 260 | |
| 261 | if (rbdr_idx) |
| 262 | goto refill; |
| 263 | } |
| 264 | |
| 265 | /* Alloc rcv buffers in non-atomic mode for better success */ |
| 266 | void nicvf_rbdr_work(struct work_struct *work) |
| 267 | { |
| 268 | struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work); |
| 269 | |
| 270 | nicvf_refill_rbdr(nic, GFP_KERNEL); |
| 271 | if (nic->rb_alloc_fail) |
| 272 | schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10)); |
| 273 | else |
| 274 | nic->rb_work_scheduled = false; |
| 275 | } |
| 276 | |
| 277 | /* In Softirq context, alloc rcv buffers in atomic mode */ |
| 278 | void nicvf_rbdr_task(unsigned long data) |
| 279 | { |
| 280 | struct nicvf *nic = (struct nicvf *)data; |
| 281 | |
| 282 | nicvf_refill_rbdr(nic, GFP_ATOMIC); |
| 283 | if (nic->rb_alloc_fail) { |
| 284 | nic->rb_work_scheduled = true; |
| 285 | schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10)); |
| 286 | } |
| 287 | } |
| 288 | |
| 289 | /* Initialize completion queue */ |
| 290 | static int nicvf_init_cmp_queue(struct nicvf *nic, |
| 291 | struct cmp_queue *cq, int q_len) |
| 292 | { |
| 293 | int err; |
| 294 | |
| 295 | err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE, |
| 296 | NICVF_CQ_BASE_ALIGN_BYTES); |
| 297 | if (err) |
| 298 | return err; |
| 299 | |
| 300 | cq->desc = cq->dmem.base; |
Sunil Goutham | b9687b4 | 2015-12-10 13:25:20 +0530 | [diff] [blame] | 301 | cq->thresh = pass1_silicon(nic->pdev) ? 0 : CMP_QUEUE_CQE_THRESH; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 302 | nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1; |
| 303 | |
| 304 | return 0; |
| 305 | } |
| 306 | |
| 307 | static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq) |
| 308 | { |
| 309 | if (!cq) |
| 310 | return; |
| 311 | if (!cq->dmem.base) |
| 312 | return; |
| 313 | |
| 314 | nicvf_free_q_desc_mem(nic, &cq->dmem); |
| 315 | } |
| 316 | |
| 317 | /* Initialize transmit queue */ |
| 318 | static int nicvf_init_snd_queue(struct nicvf *nic, |
| 319 | struct snd_queue *sq, int q_len) |
| 320 | { |
| 321 | int err; |
| 322 | |
| 323 | err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE, |
| 324 | NICVF_SQ_BASE_ALIGN_BYTES); |
| 325 | if (err) |
| 326 | return err; |
| 327 | |
| 328 | sq->desc = sq->dmem.base; |
Aleksey Makarov | 86ace69 | 2015-06-02 11:00:27 -0700 | [diff] [blame] | 329 | sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL); |
Aleksey Makarov | fa1a6c9 | 2015-06-02 11:00:26 -0700 | [diff] [blame] | 330 | if (!sq->skbuff) |
| 331 | return -ENOMEM; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 332 | sq->head = 0; |
| 333 | sq->tail = 0; |
| 334 | atomic_set(&sq->free_cnt, q_len - 1); |
| 335 | sq->thresh = SND_QUEUE_THRESH; |
| 336 | |
| 337 | /* Preallocate memory for TSO segment's header */ |
| 338 | sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev, |
| 339 | q_len * TSO_HEADER_SIZE, |
| 340 | &sq->tso_hdrs_phys, GFP_KERNEL); |
| 341 | if (!sq->tso_hdrs) |
| 342 | return -ENOMEM; |
| 343 | |
| 344 | return 0; |
| 345 | } |
| 346 | |
| 347 | static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) |
| 348 | { |
| 349 | if (!sq) |
| 350 | return; |
| 351 | if (!sq->dmem.base) |
| 352 | return; |
| 353 | |
| 354 | if (sq->tso_hdrs) |
Sunil Goutham | 143ceb0 | 2015-07-29 16:49:37 +0300 | [diff] [blame] | 355 | dma_free_coherent(&nic->pdev->dev, |
| 356 | sq->dmem.q_len * TSO_HEADER_SIZE, |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 357 | sq->tso_hdrs, sq->tso_hdrs_phys); |
| 358 | |
| 359 | kfree(sq->skbuff); |
| 360 | nicvf_free_q_desc_mem(nic, &sq->dmem); |
| 361 | } |
| 362 | |
| 363 | static void nicvf_reclaim_snd_queue(struct nicvf *nic, |
| 364 | struct queue_set *qs, int qidx) |
| 365 | { |
| 366 | /* Disable send queue */ |
| 367 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0); |
| 368 | /* Check if SQ is stopped */ |
| 369 | if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01)) |
| 370 | return; |
| 371 | /* Reset send queue */ |
| 372 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); |
| 373 | } |
| 374 | |
| 375 | static void nicvf_reclaim_rcv_queue(struct nicvf *nic, |
| 376 | struct queue_set *qs, int qidx) |
| 377 | { |
| 378 | union nic_mbx mbx = {}; |
| 379 | |
| 380 | /* Make sure all packets in the pipeline are written back into mem */ |
| 381 | mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC; |
| 382 | nicvf_send_msg_to_pf(nic, &mbx); |
| 383 | } |
| 384 | |
| 385 | static void nicvf_reclaim_cmp_queue(struct nicvf *nic, |
| 386 | struct queue_set *qs, int qidx) |
| 387 | { |
| 388 | /* Disable timer threshold (doesn't get reset upon CQ reset */ |
| 389 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0); |
| 390 | /* Disable completion queue */ |
| 391 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0); |
| 392 | /* Reset completion queue */ |
| 393 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); |
| 394 | } |
| 395 | |
| 396 | static void nicvf_reclaim_rbdr(struct nicvf *nic, |
| 397 | struct rbdr *rbdr, int qidx) |
| 398 | { |
| 399 | u64 tmp, fifo_state; |
| 400 | int timeout = 10; |
| 401 | |
| 402 | /* Save head and tail pointers for feeing up buffers */ |
| 403 | rbdr->head = nicvf_queue_reg_read(nic, |
| 404 | NIC_QSET_RBDR_0_1_HEAD, |
| 405 | qidx) >> 3; |
| 406 | rbdr->tail = nicvf_queue_reg_read(nic, |
| 407 | NIC_QSET_RBDR_0_1_TAIL, |
| 408 | qidx) >> 3; |
| 409 | |
| 410 | /* If RBDR FIFO is in 'FAIL' state then do a reset first |
| 411 | * before relaiming. |
| 412 | */ |
| 413 | fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx); |
| 414 | if (((fifo_state >> 62) & 0x03) == 0x3) |
| 415 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, |
| 416 | qidx, NICVF_RBDR_RESET); |
| 417 | |
| 418 | /* Disable RBDR */ |
| 419 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0); |
| 420 | if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) |
| 421 | return; |
| 422 | while (1) { |
| 423 | tmp = nicvf_queue_reg_read(nic, |
| 424 | NIC_QSET_RBDR_0_1_PREFETCH_STATUS, |
| 425 | qidx); |
| 426 | if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF)) |
| 427 | break; |
| 428 | usleep_range(1000, 2000); |
| 429 | timeout--; |
| 430 | if (!timeout) { |
| 431 | netdev_err(nic->netdev, |
| 432 | "Failed polling on prefetch status\n"); |
| 433 | return; |
| 434 | } |
| 435 | } |
| 436 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, |
| 437 | qidx, NICVF_RBDR_RESET); |
| 438 | |
| 439 | if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02)) |
| 440 | return; |
| 441 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00); |
| 442 | if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) |
| 443 | return; |
| 444 | } |
| 445 | |
Sunil Goutham | aa2e259 | 2015-08-30 12:29:13 +0300 | [diff] [blame] | 446 | void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features) |
| 447 | { |
| 448 | u64 rq_cfg; |
| 449 | int sqs; |
| 450 | |
| 451 | rq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_RQ_GEN_CFG, 0); |
| 452 | |
| 453 | /* Enable first VLAN stripping */ |
| 454 | if (features & NETIF_F_HW_VLAN_CTAG_RX) |
| 455 | rq_cfg |= (1ULL << 25); |
| 456 | else |
| 457 | rq_cfg &= ~(1ULL << 25); |
| 458 | nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, rq_cfg); |
| 459 | |
| 460 | /* Configure Secondary Qsets, if any */ |
| 461 | for (sqs = 0; sqs < nic->sqs_count; sqs++) |
| 462 | if (nic->snicvf[sqs]) |
| 463 | nicvf_queue_reg_write(nic->snicvf[sqs], |
| 464 | NIC_QSET_RQ_GEN_CFG, 0, rq_cfg); |
| 465 | } |
| 466 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 467 | /* Configures receive queue */ |
| 468 | static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, |
| 469 | int qidx, bool enable) |
| 470 | { |
| 471 | union nic_mbx mbx = {}; |
| 472 | struct rcv_queue *rq; |
| 473 | struct rq_cfg rq_cfg; |
| 474 | |
| 475 | rq = &qs->rq[qidx]; |
| 476 | rq->enable = enable; |
| 477 | |
| 478 | /* Disable receive queue */ |
| 479 | nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0); |
| 480 | |
| 481 | if (!rq->enable) { |
| 482 | nicvf_reclaim_rcv_queue(nic, qs, qidx); |
| 483 | return; |
| 484 | } |
| 485 | |
| 486 | rq->cq_qs = qs->vnic_id; |
| 487 | rq->cq_idx = qidx; |
| 488 | rq->start_rbdr_qs = qs->vnic_id; |
| 489 | rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1; |
| 490 | rq->cont_rbdr_qs = qs->vnic_id; |
| 491 | rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1; |
| 492 | /* all writes of RBDR data to be loaded into L2 Cache as well*/ |
| 493 | rq->caching = 1; |
| 494 | |
| 495 | /* Send a mailbox msg to PF to config RQ */ |
| 496 | mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG; |
| 497 | mbx.rq.qs_num = qs->vnic_id; |
| 498 | mbx.rq.rq_num = qidx; |
| 499 | mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) | |
| 500 | (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) | |
| 501 | (rq->cont_qs_rbdr_idx << 8) | |
| 502 | (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx); |
| 503 | nicvf_send_msg_to_pf(nic, &mbx); |
| 504 | |
| 505 | mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG; |
| 506 | mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (qs->vnic_id << 0); |
| 507 | nicvf_send_msg_to_pf(nic, &mbx); |
| 508 | |
| 509 | /* RQ drop config |
| 510 | * Enable CQ drop to reserve sufficient CQEs for all tx packets |
| 511 | */ |
| 512 | mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG; |
| 513 | mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8); |
| 514 | nicvf_send_msg_to_pf(nic, &mbx); |
| 515 | |
Sunil Goutham | aa2e259 | 2015-08-30 12:29:13 +0300 | [diff] [blame] | 516 | nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00); |
| 517 | if (!nic->sqs_mode) |
| 518 | nicvf_config_vlan_stripping(nic, nic->netdev->features); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 519 | |
| 520 | /* Enable Receive queue */ |
| 521 | rq_cfg.ena = 1; |
| 522 | rq_cfg.tcp_ena = 0; |
| 523 | nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg); |
| 524 | } |
| 525 | |
| 526 | /* Configures completion queue */ |
| 527 | void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, |
| 528 | int qidx, bool enable) |
| 529 | { |
| 530 | struct cmp_queue *cq; |
| 531 | struct cq_cfg cq_cfg; |
| 532 | |
| 533 | cq = &qs->cq[qidx]; |
| 534 | cq->enable = enable; |
| 535 | |
| 536 | if (!cq->enable) { |
| 537 | nicvf_reclaim_cmp_queue(nic, qs, qidx); |
| 538 | return; |
| 539 | } |
| 540 | |
| 541 | /* Reset completion queue */ |
| 542 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); |
| 543 | |
| 544 | if (!cq->enable) |
| 545 | return; |
| 546 | |
| 547 | spin_lock_init(&cq->lock); |
| 548 | /* Set completion queue base address */ |
| 549 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, |
| 550 | qidx, (u64)(cq->dmem.phys_base)); |
| 551 | |
| 552 | /* Enable Completion queue */ |
| 553 | cq_cfg.ena = 1; |
| 554 | cq_cfg.reset = 0; |
| 555 | cq_cfg.caching = 0; |
| 556 | cq_cfg.qsize = CMP_QSIZE; |
| 557 | cq_cfg.avg_con = 0; |
| 558 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg); |
| 559 | |
| 560 | /* Set threshold value for interrupt generation */ |
| 561 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); |
| 562 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, |
Sunil Goutham | 006394a | 2015-12-02 15:36:15 +0530 | [diff] [blame] | 563 | qidx, CMP_QUEUE_TIMER_THRESH); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 564 | } |
| 565 | |
| 566 | /* Configures transmit queue */ |
| 567 | static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, |
| 568 | int qidx, bool enable) |
| 569 | { |
| 570 | union nic_mbx mbx = {}; |
| 571 | struct snd_queue *sq; |
| 572 | struct sq_cfg sq_cfg; |
| 573 | |
| 574 | sq = &qs->sq[qidx]; |
| 575 | sq->enable = enable; |
| 576 | |
| 577 | if (!sq->enable) { |
| 578 | nicvf_reclaim_snd_queue(nic, qs, qidx); |
| 579 | return; |
| 580 | } |
| 581 | |
| 582 | /* Reset send queue */ |
| 583 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); |
| 584 | |
| 585 | sq->cq_qs = qs->vnic_id; |
| 586 | sq->cq_idx = qidx; |
| 587 | |
| 588 | /* Send a mailbox msg to PF to config SQ */ |
| 589 | mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG; |
| 590 | mbx.sq.qs_num = qs->vnic_id; |
| 591 | mbx.sq.sq_num = qidx; |
Sunil Goutham | 92dc876 | 2015-08-30 12:29:15 +0300 | [diff] [blame] | 592 | mbx.sq.sqs_mode = nic->sqs_mode; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 593 | mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx; |
| 594 | nicvf_send_msg_to_pf(nic, &mbx); |
| 595 | |
| 596 | /* Set queue base address */ |
| 597 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, |
| 598 | qidx, (u64)(sq->dmem.phys_base)); |
| 599 | |
| 600 | /* Enable send queue & set queue size */ |
| 601 | sq_cfg.ena = 1; |
| 602 | sq_cfg.reset = 0; |
| 603 | sq_cfg.ldwb = 0; |
| 604 | sq_cfg.qsize = SND_QSIZE; |
| 605 | sq_cfg.tstmp_bgx_intf = 0; |
| 606 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg); |
| 607 | |
| 608 | /* Set threshold value for interrupt generation */ |
| 609 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh); |
| 610 | |
| 611 | /* Set queue:cpu affinity for better load distribution */ |
| 612 | if (cpu_online(qidx)) { |
| 613 | cpumask_set_cpu(qidx, &sq->affinity_mask); |
| 614 | netif_set_xps_queue(nic->netdev, |
| 615 | &sq->affinity_mask, qidx); |
| 616 | } |
| 617 | } |
| 618 | |
| 619 | /* Configures receive buffer descriptor ring */ |
| 620 | static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, |
| 621 | int qidx, bool enable) |
| 622 | { |
| 623 | struct rbdr *rbdr; |
| 624 | struct rbdr_cfg rbdr_cfg; |
| 625 | |
| 626 | rbdr = &qs->rbdr[qidx]; |
| 627 | nicvf_reclaim_rbdr(nic, rbdr, qidx); |
| 628 | if (!enable) |
| 629 | return; |
| 630 | |
| 631 | /* Set descriptor base address */ |
| 632 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, |
| 633 | qidx, (u64)(rbdr->dmem.phys_base)); |
| 634 | |
| 635 | /* Enable RBDR & set queue size */ |
| 636 | /* Buffer size should be in multiples of 128 bytes */ |
| 637 | rbdr_cfg.ena = 1; |
| 638 | rbdr_cfg.reset = 0; |
| 639 | rbdr_cfg.ldwb = 0; |
| 640 | rbdr_cfg.qsize = RBDR_SIZE; |
| 641 | rbdr_cfg.avg_con = 0; |
| 642 | rbdr_cfg.lines = rbdr->dma_size / 128; |
| 643 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, |
| 644 | qidx, *(u64 *)&rbdr_cfg); |
| 645 | |
| 646 | /* Notify HW */ |
| 647 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, |
| 648 | qidx, qs->rbdr_len - 1); |
| 649 | |
| 650 | /* Set threshold value for interrupt generation */ |
| 651 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, |
| 652 | qidx, rbdr->thresh - 1); |
| 653 | } |
| 654 | |
| 655 | /* Requests PF to assign and enable Qset */ |
| 656 | void nicvf_qset_config(struct nicvf *nic, bool enable) |
| 657 | { |
| 658 | union nic_mbx mbx = {}; |
| 659 | struct queue_set *qs = nic->qs; |
| 660 | struct qs_cfg *qs_cfg; |
| 661 | |
| 662 | if (!qs) { |
| 663 | netdev_warn(nic->netdev, |
| 664 | "Qset is still not allocated, don't init queues\n"); |
| 665 | return; |
| 666 | } |
| 667 | |
| 668 | qs->enable = enable; |
| 669 | qs->vnic_id = nic->vf_id; |
| 670 | |
| 671 | /* Send a mailbox msg to PF to config Qset */ |
| 672 | mbx.qs.msg = NIC_MBOX_MSG_QS_CFG; |
| 673 | mbx.qs.num = qs->vnic_id; |
Sunil Goutham | 92dc876 | 2015-08-30 12:29:15 +0300 | [diff] [blame] | 674 | mbx.qs.sqs_count = nic->sqs_count; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 675 | |
| 676 | mbx.qs.cfg = 0; |
| 677 | qs_cfg = (struct qs_cfg *)&mbx.qs.cfg; |
| 678 | if (qs->enable) { |
| 679 | qs_cfg->ena = 1; |
| 680 | #ifdef __BIG_ENDIAN |
| 681 | qs_cfg->be = 1; |
| 682 | #endif |
| 683 | qs_cfg->vnic = qs->vnic_id; |
| 684 | } |
| 685 | nicvf_send_msg_to_pf(nic, &mbx); |
| 686 | } |
| 687 | |
| 688 | static void nicvf_free_resources(struct nicvf *nic) |
| 689 | { |
| 690 | int qidx; |
| 691 | struct queue_set *qs = nic->qs; |
| 692 | |
| 693 | /* Free receive buffer descriptor ring */ |
| 694 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) |
| 695 | nicvf_free_rbdr(nic, &qs->rbdr[qidx]); |
| 696 | |
| 697 | /* Free completion queue */ |
| 698 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) |
| 699 | nicvf_free_cmp_queue(nic, &qs->cq[qidx]); |
| 700 | |
| 701 | /* Free send queue */ |
| 702 | for (qidx = 0; qidx < qs->sq_cnt; qidx++) |
| 703 | nicvf_free_snd_queue(nic, &qs->sq[qidx]); |
| 704 | } |
| 705 | |
| 706 | static int nicvf_alloc_resources(struct nicvf *nic) |
| 707 | { |
| 708 | int qidx; |
| 709 | struct queue_set *qs = nic->qs; |
| 710 | |
| 711 | /* Alloc receive buffer descriptor ring */ |
| 712 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { |
| 713 | if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len, |
| 714 | DMA_BUFFER_LEN)) |
| 715 | goto alloc_fail; |
| 716 | } |
| 717 | |
| 718 | /* Alloc send queue */ |
| 719 | for (qidx = 0; qidx < qs->sq_cnt; qidx++) { |
| 720 | if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len)) |
| 721 | goto alloc_fail; |
| 722 | } |
| 723 | |
| 724 | /* Alloc completion queue */ |
| 725 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) { |
| 726 | if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len)) |
| 727 | goto alloc_fail; |
| 728 | } |
| 729 | |
| 730 | return 0; |
| 731 | alloc_fail: |
| 732 | nicvf_free_resources(nic); |
| 733 | return -ENOMEM; |
| 734 | } |
| 735 | |
| 736 | int nicvf_set_qset_resources(struct nicvf *nic) |
| 737 | { |
| 738 | struct queue_set *qs; |
| 739 | |
| 740 | qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL); |
| 741 | if (!qs) |
| 742 | return -ENOMEM; |
| 743 | nic->qs = qs; |
| 744 | |
| 745 | /* Set count of each queue */ |
| 746 | qs->rbdr_cnt = RBDR_CNT; |
| 747 | qs->rq_cnt = RCV_QUEUE_CNT; |
| 748 | qs->sq_cnt = SND_QUEUE_CNT; |
| 749 | qs->cq_cnt = CMP_QUEUE_CNT; |
| 750 | |
| 751 | /* Set queue lengths */ |
| 752 | qs->rbdr_len = RCV_BUF_COUNT; |
| 753 | qs->sq_len = SND_QUEUE_LEN; |
| 754 | qs->cq_len = CMP_QUEUE_LEN; |
Sunil Goutham | 92dc876 | 2015-08-30 12:29:15 +0300 | [diff] [blame] | 755 | |
| 756 | nic->rx_queues = qs->rq_cnt; |
| 757 | nic->tx_queues = qs->sq_cnt; |
| 758 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 759 | return 0; |
| 760 | } |
| 761 | |
| 762 | int nicvf_config_data_transfer(struct nicvf *nic, bool enable) |
| 763 | { |
| 764 | bool disable = false; |
| 765 | struct queue_set *qs = nic->qs; |
| 766 | int qidx; |
| 767 | |
| 768 | if (!qs) |
| 769 | return 0; |
| 770 | |
| 771 | if (enable) { |
| 772 | if (nicvf_alloc_resources(nic)) |
| 773 | return -ENOMEM; |
| 774 | |
| 775 | for (qidx = 0; qidx < qs->sq_cnt; qidx++) |
| 776 | nicvf_snd_queue_config(nic, qs, qidx, enable); |
| 777 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) |
| 778 | nicvf_cmp_queue_config(nic, qs, qidx, enable); |
| 779 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) |
| 780 | nicvf_rbdr_config(nic, qs, qidx, enable); |
| 781 | for (qidx = 0; qidx < qs->rq_cnt; qidx++) |
| 782 | nicvf_rcv_queue_config(nic, qs, qidx, enable); |
| 783 | } else { |
| 784 | for (qidx = 0; qidx < qs->rq_cnt; qidx++) |
| 785 | nicvf_rcv_queue_config(nic, qs, qidx, disable); |
| 786 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) |
| 787 | nicvf_rbdr_config(nic, qs, qidx, disable); |
| 788 | for (qidx = 0; qidx < qs->sq_cnt; qidx++) |
| 789 | nicvf_snd_queue_config(nic, qs, qidx, disable); |
| 790 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) |
| 791 | nicvf_cmp_queue_config(nic, qs, qidx, disable); |
| 792 | |
| 793 | nicvf_free_resources(nic); |
| 794 | } |
| 795 | |
| 796 | return 0; |
| 797 | } |
| 798 | |
| 799 | /* Get a free desc from SQ |
| 800 | * returns descriptor ponter & descriptor number |
| 801 | */ |
| 802 | static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt) |
| 803 | { |
| 804 | int qentry; |
| 805 | |
| 806 | qentry = sq->tail; |
| 807 | atomic_sub(desc_cnt, &sq->free_cnt); |
| 808 | sq->tail += desc_cnt; |
| 809 | sq->tail &= (sq->dmem.q_len - 1); |
| 810 | |
| 811 | return qentry; |
| 812 | } |
| 813 | |
| 814 | /* Free descriptor back to SQ for future use */ |
| 815 | void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt) |
| 816 | { |
| 817 | atomic_add(desc_cnt, &sq->free_cnt); |
| 818 | sq->head += desc_cnt; |
| 819 | sq->head &= (sq->dmem.q_len - 1); |
| 820 | } |
| 821 | |
| 822 | static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry) |
| 823 | { |
| 824 | qentry++; |
| 825 | qentry &= (sq->dmem.q_len - 1); |
| 826 | return qentry; |
| 827 | } |
| 828 | |
| 829 | void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx) |
| 830 | { |
| 831 | u64 sq_cfg; |
| 832 | |
| 833 | sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); |
| 834 | sq_cfg |= NICVF_SQ_EN; |
| 835 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); |
| 836 | /* Ring doorbell so that H/W restarts processing SQEs */ |
| 837 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0); |
| 838 | } |
| 839 | |
| 840 | void nicvf_sq_disable(struct nicvf *nic, int qidx) |
| 841 | { |
| 842 | u64 sq_cfg; |
| 843 | |
| 844 | sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); |
| 845 | sq_cfg &= ~NICVF_SQ_EN; |
| 846 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); |
| 847 | } |
| 848 | |
| 849 | void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq, |
| 850 | int qidx) |
| 851 | { |
| 852 | u64 head, tail; |
| 853 | struct sk_buff *skb; |
| 854 | struct nicvf *nic = netdev_priv(netdev); |
| 855 | struct sq_hdr_subdesc *hdr; |
| 856 | |
| 857 | head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4; |
| 858 | tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4; |
| 859 | while (sq->head != head) { |
| 860 | hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); |
| 861 | if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) { |
| 862 | nicvf_put_sq_desc(sq, 1); |
| 863 | continue; |
| 864 | } |
| 865 | skb = (struct sk_buff *)sq->skbuff[sq->head]; |
Sunil Goutham | 143ceb0 | 2015-07-29 16:49:37 +0300 | [diff] [blame] | 866 | if (skb) |
| 867 | dev_kfree_skb_any(skb); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 868 | atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets); |
| 869 | atomic64_add(hdr->tot_len, |
| 870 | (atomic64_t *)&netdev->stats.tx_bytes); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 871 | nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); |
| 872 | } |
| 873 | } |
| 874 | |
| 875 | /* Calculate no of SQ subdescriptors needed to transmit all |
| 876 | * segments of this TSO packet. |
| 877 | * Taken from 'Tilera network driver' with a minor modification. |
| 878 | */ |
| 879 | static int nicvf_tso_count_subdescs(struct sk_buff *skb) |
| 880 | { |
| 881 | struct skb_shared_info *sh = skb_shinfo(skb); |
| 882 | unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
| 883 | unsigned int data_len = skb->len - sh_len; |
| 884 | unsigned int p_len = sh->gso_size; |
| 885 | long f_id = -1; /* id of the current fragment */ |
| 886 | long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ |
| 887 | long f_used = 0; /* bytes used from the current fragment */ |
| 888 | long n; /* size of the current piece of payload */ |
| 889 | int num_edescs = 0; |
| 890 | int segment; |
| 891 | |
| 892 | for (segment = 0; segment < sh->gso_segs; segment++) { |
| 893 | unsigned int p_used = 0; |
| 894 | |
| 895 | /* One edesc for header and for each piece of the payload. */ |
| 896 | for (num_edescs++; p_used < p_len; num_edescs++) { |
| 897 | /* Advance as needed. */ |
| 898 | while (f_used >= f_size) { |
| 899 | f_id++; |
| 900 | f_size = skb_frag_size(&sh->frags[f_id]); |
| 901 | f_used = 0; |
| 902 | } |
| 903 | |
| 904 | /* Use bytes from the current fragment. */ |
| 905 | n = p_len - p_used; |
| 906 | if (n > f_size - f_used) |
| 907 | n = f_size - f_used; |
| 908 | f_used += n; |
| 909 | p_used += n; |
| 910 | } |
| 911 | |
| 912 | /* The last segment may be less than gso_size. */ |
| 913 | data_len -= p_len; |
| 914 | if (data_len < p_len) |
| 915 | p_len = data_len; |
| 916 | } |
| 917 | |
| 918 | /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */ |
| 919 | return num_edescs + sh->gso_segs; |
| 920 | } |
| 921 | |
| 922 | /* Get the number of SQ descriptors needed to xmit this skb */ |
| 923 | static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb) |
| 924 | { |
| 925 | int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT; |
| 926 | |
Sunil Goutham | 40fb5f8 | 2015-12-10 13:25:19 +0530 | [diff] [blame] | 927 | if (skb_shinfo(skb)->gso_size && !nic->hw_tso) { |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 928 | subdesc_cnt = nicvf_tso_count_subdescs(skb); |
| 929 | return subdesc_cnt; |
| 930 | } |
| 931 | |
| 932 | if (skb_shinfo(skb)->nr_frags) |
| 933 | subdesc_cnt += skb_shinfo(skb)->nr_frags; |
| 934 | |
| 935 | return subdesc_cnt; |
| 936 | } |
| 937 | |
| 938 | /* Add SQ HEADER subdescriptor. |
| 939 | * First subdescriptor for every send descriptor. |
| 940 | */ |
| 941 | static inline void |
Sunil Goutham | 40fb5f8 | 2015-12-10 13:25:19 +0530 | [diff] [blame] | 942 | nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry, |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 943 | int subdesc_cnt, struct sk_buff *skb, int len) |
| 944 | { |
| 945 | int proto; |
| 946 | struct sq_hdr_subdesc *hdr; |
| 947 | |
| 948 | hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); |
| 949 | sq->skbuff[qentry] = (u64)skb; |
| 950 | |
| 951 | memset(hdr, 0, SND_QUEUE_DESC_SIZE); |
| 952 | hdr->subdesc_type = SQ_DESC_TYPE_HEADER; |
| 953 | /* Enable notification via CQE after processing SQE */ |
| 954 | hdr->post_cqe = 1; |
| 955 | /* No of subdescriptors following this */ |
| 956 | hdr->subdesc_cnt = subdesc_cnt; |
| 957 | hdr->tot_len = len; |
| 958 | |
| 959 | /* Offload checksum calculation to HW */ |
| 960 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 961 | hdr->csum_l3 = 1; /* Enable IP csum calculation */ |
| 962 | hdr->l3_offset = skb_network_offset(skb); |
| 963 | hdr->l4_offset = skb_transport_offset(skb); |
| 964 | |
| 965 | proto = ip_hdr(skb)->protocol; |
| 966 | switch (proto) { |
| 967 | case IPPROTO_TCP: |
| 968 | hdr->csum_l4 = SEND_L4_CSUM_TCP; |
| 969 | break; |
| 970 | case IPPROTO_UDP: |
| 971 | hdr->csum_l4 = SEND_L4_CSUM_UDP; |
| 972 | break; |
| 973 | case IPPROTO_SCTP: |
| 974 | hdr->csum_l4 = SEND_L4_CSUM_SCTP; |
| 975 | break; |
| 976 | } |
| 977 | } |
Sunil Goutham | 40fb5f8 | 2015-12-10 13:25:19 +0530 | [diff] [blame] | 978 | |
| 979 | if (nic->hw_tso && skb_shinfo(skb)->gso_size) { |
| 980 | hdr->tso = 1; |
| 981 | hdr->tso_start = skb_transport_offset(skb) + tcp_hdrlen(skb); |
| 982 | hdr->tso_max_paysize = skb_shinfo(skb)->gso_size; |
| 983 | /* For non-tunneled pkts, point this to L2 ethertype */ |
| 984 | hdr->inner_l3_offset = skb_network_offset(skb) - 2; |
| 985 | nic->drv_stats.tx_tso++; |
| 986 | } |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 987 | } |
| 988 | |
| 989 | /* SQ GATHER subdescriptor |
| 990 | * Must follow HDR descriptor |
| 991 | */ |
| 992 | static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, |
| 993 | int size, u64 data) |
| 994 | { |
| 995 | struct sq_gather_subdesc *gather; |
| 996 | |
| 997 | qentry &= (sq->dmem.q_len - 1); |
| 998 | gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry); |
| 999 | |
| 1000 | memset(gather, 0, SND_QUEUE_DESC_SIZE); |
| 1001 | gather->subdesc_type = SQ_DESC_TYPE_GATHER; |
Sunil Goutham | 4b561c1 | 2015-07-29 16:49:36 +0300 | [diff] [blame] | 1002 | gather->ld_type = NIC_SEND_LD_TYPE_E_LDD; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1003 | gather->size = size; |
| 1004 | gather->addr = data; |
| 1005 | } |
| 1006 | |
| 1007 | /* Segment a TSO packet into 'gso_size' segments and append |
| 1008 | * them to SQ for transfer |
| 1009 | */ |
| 1010 | static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, |
Sunil Goutham | 92dc876 | 2015-08-30 12:29:15 +0300 | [diff] [blame] | 1011 | int sq_num, int qentry, struct sk_buff *skb) |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1012 | { |
| 1013 | struct tso_t tso; |
| 1014 | int seg_subdescs = 0, desc_cnt = 0; |
| 1015 | int seg_len, total_len, data_left; |
| 1016 | int hdr_qentry = qentry; |
| 1017 | int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
| 1018 | |
| 1019 | tso_start(skb, &tso); |
| 1020 | total_len = skb->len - hdr_len; |
| 1021 | while (total_len > 0) { |
| 1022 | char *hdr; |
| 1023 | |
| 1024 | /* Save Qentry for adding HDR_SUBDESC at the end */ |
| 1025 | hdr_qentry = qentry; |
| 1026 | |
| 1027 | data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); |
| 1028 | total_len -= data_left; |
| 1029 | |
| 1030 | /* Add segment's header */ |
| 1031 | qentry = nicvf_get_nxt_sqentry(sq, qentry); |
| 1032 | hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE; |
| 1033 | tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); |
| 1034 | nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len, |
| 1035 | sq->tso_hdrs_phys + |
| 1036 | qentry * TSO_HEADER_SIZE); |
| 1037 | /* HDR_SUDESC + GATHER */ |
| 1038 | seg_subdescs = 2; |
| 1039 | seg_len = hdr_len; |
| 1040 | |
| 1041 | /* Add segment's payload fragments */ |
| 1042 | while (data_left > 0) { |
| 1043 | int size; |
| 1044 | |
| 1045 | size = min_t(int, tso.size, data_left); |
| 1046 | |
| 1047 | qentry = nicvf_get_nxt_sqentry(sq, qentry); |
| 1048 | nicvf_sq_add_gather_subdesc(sq, qentry, size, |
| 1049 | virt_to_phys(tso.data)); |
| 1050 | seg_subdescs++; |
| 1051 | seg_len += size; |
| 1052 | |
| 1053 | data_left -= size; |
| 1054 | tso_build_data(skb, &tso, size); |
| 1055 | } |
Sunil Goutham | 40fb5f8 | 2015-12-10 13:25:19 +0530 | [diff] [blame] | 1056 | nicvf_sq_add_hdr_subdesc(nic, sq, hdr_qentry, |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1057 | seg_subdescs - 1, skb, seg_len); |
Sunil Goutham | 143ceb0 | 2015-07-29 16:49:37 +0300 | [diff] [blame] | 1058 | sq->skbuff[hdr_qentry] = (u64)NULL; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1059 | qentry = nicvf_get_nxt_sqentry(sq, qentry); |
| 1060 | |
| 1061 | desc_cnt += seg_subdescs; |
| 1062 | } |
| 1063 | /* Save SKB in the last segment for freeing */ |
| 1064 | sq->skbuff[hdr_qentry] = (u64)skb; |
| 1065 | |
| 1066 | /* make sure all memory stores are done before ringing doorbell */ |
| 1067 | smp_wmb(); |
| 1068 | |
| 1069 | /* Inform HW to xmit all TSO segments */ |
| 1070 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, |
Sunil Goutham | 92dc876 | 2015-08-30 12:29:15 +0300 | [diff] [blame] | 1071 | sq_num, desc_cnt); |
Sunil Goutham | 2cb468e | 2015-07-29 16:49:40 +0300 | [diff] [blame] | 1072 | nic->drv_stats.tx_tso++; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1073 | return 1; |
| 1074 | } |
| 1075 | |
| 1076 | /* Append an skb to a SQ for packet transfer. */ |
| 1077 | int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb) |
| 1078 | { |
| 1079 | int i, size; |
| 1080 | int subdesc_cnt; |
| 1081 | int sq_num, qentry; |
Sunil Goutham | 92dc876 | 2015-08-30 12:29:15 +0300 | [diff] [blame] | 1082 | struct queue_set *qs; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1083 | struct snd_queue *sq; |
| 1084 | |
| 1085 | sq_num = skb_get_queue_mapping(skb); |
Sunil Goutham | 92dc876 | 2015-08-30 12:29:15 +0300 | [diff] [blame] | 1086 | if (sq_num >= MAX_SND_QUEUES_PER_QS) { |
| 1087 | /* Get secondary Qset's SQ structure */ |
| 1088 | i = sq_num / MAX_SND_QUEUES_PER_QS; |
| 1089 | if (!nic->snicvf[i - 1]) { |
| 1090 | netdev_warn(nic->netdev, |
| 1091 | "Secondary Qset#%d's ptr not initialized\n", |
| 1092 | i - 1); |
| 1093 | return 1; |
| 1094 | } |
| 1095 | nic = (struct nicvf *)nic->snicvf[i - 1]; |
| 1096 | sq_num = sq_num % MAX_SND_QUEUES_PER_QS; |
| 1097 | } |
| 1098 | |
| 1099 | qs = nic->qs; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1100 | sq = &qs->sq[sq_num]; |
| 1101 | |
| 1102 | subdesc_cnt = nicvf_sq_subdesc_required(nic, skb); |
| 1103 | if (subdesc_cnt > atomic_read(&sq->free_cnt)) |
| 1104 | goto append_fail; |
| 1105 | |
| 1106 | qentry = nicvf_get_sq_desc(sq, subdesc_cnt); |
| 1107 | |
| 1108 | /* Check if its a TSO packet */ |
Sunil Goutham | 40fb5f8 | 2015-12-10 13:25:19 +0530 | [diff] [blame] | 1109 | if (skb_shinfo(skb)->gso_size && !nic->hw_tso) |
Sunil Goutham | 92dc876 | 2015-08-30 12:29:15 +0300 | [diff] [blame] | 1110 | return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1111 | |
| 1112 | /* Add SQ header subdesc */ |
Sunil Goutham | 40fb5f8 | 2015-12-10 13:25:19 +0530 | [diff] [blame] | 1113 | nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1, |
| 1114 | skb, skb->len); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1115 | |
| 1116 | /* Add SQ gather subdescs */ |
| 1117 | qentry = nicvf_get_nxt_sqentry(sq, qentry); |
| 1118 | size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; |
| 1119 | nicvf_sq_add_gather_subdesc(sq, qentry, size, virt_to_phys(skb->data)); |
| 1120 | |
| 1121 | /* Check for scattered buffer */ |
| 1122 | if (!skb_is_nonlinear(skb)) |
| 1123 | goto doorbell; |
| 1124 | |
| 1125 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
| 1126 | const struct skb_frag_struct *frag; |
| 1127 | |
| 1128 | frag = &skb_shinfo(skb)->frags[i]; |
| 1129 | |
| 1130 | qentry = nicvf_get_nxt_sqentry(sq, qentry); |
| 1131 | size = skb_frag_size(frag); |
| 1132 | nicvf_sq_add_gather_subdesc(sq, qentry, size, |
| 1133 | virt_to_phys( |
| 1134 | skb_frag_address(frag))); |
| 1135 | } |
| 1136 | |
| 1137 | doorbell: |
| 1138 | /* make sure all memory stores are done before ringing doorbell */ |
| 1139 | smp_wmb(); |
| 1140 | |
| 1141 | /* Inform HW to xmit new packet */ |
| 1142 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, |
| 1143 | sq_num, subdesc_cnt); |
| 1144 | return 1; |
| 1145 | |
| 1146 | append_fail: |
Sunil Goutham | 92dc876 | 2015-08-30 12:29:15 +0300 | [diff] [blame] | 1147 | /* Use original PCI dev for debug log */ |
| 1148 | nic = nic->pnicvf; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1149 | netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n"); |
| 1150 | return 0; |
| 1151 | } |
| 1152 | |
| 1153 | static inline unsigned frag_num(unsigned i) |
| 1154 | { |
| 1155 | #ifdef __BIG_ENDIAN |
| 1156 | return (i & ~3) + 3 - (i & 3); |
| 1157 | #else |
| 1158 | return i; |
| 1159 | #endif |
| 1160 | } |
| 1161 | |
| 1162 | /* Returns SKB for a received packet */ |
| 1163 | struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx) |
| 1164 | { |
| 1165 | int frag; |
| 1166 | int payload_len = 0; |
| 1167 | struct sk_buff *skb = NULL; |
| 1168 | struct sk_buff *skb_frag = NULL; |
| 1169 | struct sk_buff *prev_frag = NULL; |
| 1170 | u16 *rb_lens = NULL; |
| 1171 | u64 *rb_ptrs = NULL; |
| 1172 | |
| 1173 | rb_lens = (void *)cqe_rx + (3 * sizeof(u64)); |
| 1174 | rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64)); |
| 1175 | |
| 1176 | netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n", |
| 1177 | __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz); |
| 1178 | |
| 1179 | for (frag = 0; frag < cqe_rx->rb_cnt; frag++) { |
| 1180 | payload_len = rb_lens[frag_num(frag)]; |
| 1181 | if (!frag) { |
| 1182 | /* First fragment */ |
| 1183 | skb = nicvf_rb_ptr_to_skb(nic, |
| 1184 | *rb_ptrs - cqe_rx->align_pad, |
| 1185 | payload_len); |
| 1186 | if (!skb) |
| 1187 | return NULL; |
| 1188 | skb_reserve(skb, cqe_rx->align_pad); |
| 1189 | skb_put(skb, payload_len); |
| 1190 | } else { |
| 1191 | /* Add fragments */ |
| 1192 | skb_frag = nicvf_rb_ptr_to_skb(nic, *rb_ptrs, |
| 1193 | payload_len); |
| 1194 | if (!skb_frag) { |
| 1195 | dev_kfree_skb(skb); |
| 1196 | return NULL; |
| 1197 | } |
| 1198 | |
| 1199 | if (!skb_shinfo(skb)->frag_list) |
| 1200 | skb_shinfo(skb)->frag_list = skb_frag; |
| 1201 | else |
| 1202 | prev_frag->next = skb_frag; |
| 1203 | |
| 1204 | prev_frag = skb_frag; |
| 1205 | skb->len += payload_len; |
| 1206 | skb->data_len += payload_len; |
| 1207 | skb_frag->len = payload_len; |
| 1208 | } |
| 1209 | /* Next buffer pointer */ |
| 1210 | rb_ptrs++; |
| 1211 | } |
| 1212 | return skb; |
| 1213 | } |
| 1214 | |
Yury Norov | b45ceb4 | 2015-12-07 10:30:32 +0530 | [diff] [blame] | 1215 | static u64 nicvf_int_type_to_mask(int int_type, int q_idx) |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1216 | { |
| 1217 | u64 reg_val; |
| 1218 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1219 | switch (int_type) { |
| 1220 | case NICVF_INTR_CQ: |
| 1221 | reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); |
| 1222 | break; |
| 1223 | case NICVF_INTR_SQ: |
| 1224 | reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); |
| 1225 | break; |
| 1226 | case NICVF_INTR_RBDR: |
| 1227 | reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); |
| 1228 | break; |
| 1229 | case NICVF_INTR_PKT_DROP: |
| 1230 | reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT); |
| 1231 | break; |
| 1232 | case NICVF_INTR_TCP_TIMER: |
| 1233 | reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); |
| 1234 | break; |
| 1235 | case NICVF_INTR_MBOX: |
| 1236 | reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT); |
| 1237 | break; |
| 1238 | case NICVF_INTR_QS_ERR: |
Yury Norov | b45ceb4 | 2015-12-07 10:30:32 +0530 | [diff] [blame] | 1239 | reg_val = (1ULL << NICVF_INTR_QS_ERR_SHIFT); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1240 | break; |
| 1241 | default: |
Yury Norov | b45ceb4 | 2015-12-07 10:30:32 +0530 | [diff] [blame] | 1242 | reg_val = 0; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1243 | } |
| 1244 | |
Yury Norov | b45ceb4 | 2015-12-07 10:30:32 +0530 | [diff] [blame] | 1245 | return reg_val; |
| 1246 | } |
| 1247 | |
| 1248 | /* Enable interrupt */ |
| 1249 | void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) |
| 1250 | { |
| 1251 | u64 mask = nicvf_int_type_to_mask(int_type, q_idx); |
| 1252 | |
| 1253 | if (!mask) { |
| 1254 | netdev_dbg(nic->netdev, |
| 1255 | "Failed to enable interrupt: unknown type\n"); |
| 1256 | return; |
| 1257 | } |
| 1258 | nicvf_reg_write(nic, NIC_VF_ENA_W1S, |
| 1259 | nicvf_reg_read(nic, NIC_VF_ENA_W1S) | mask); |
| 1260 | } |
| 1261 | |
| 1262 | /* Disable interrupt */ |
| 1263 | void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx) |
| 1264 | { |
| 1265 | u64 mask = nicvf_int_type_to_mask(int_type, q_idx); |
| 1266 | |
| 1267 | if (!mask) { |
| 1268 | netdev_dbg(nic->netdev, |
| 1269 | "Failed to disable interrupt: unknown type\n"); |
| 1270 | return; |
| 1271 | } |
| 1272 | |
| 1273 | nicvf_reg_write(nic, NIC_VF_ENA_W1C, mask); |
| 1274 | } |
| 1275 | |
| 1276 | /* Clear interrupt */ |
| 1277 | void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx) |
| 1278 | { |
| 1279 | u64 mask = nicvf_int_type_to_mask(int_type, q_idx); |
| 1280 | |
| 1281 | if (!mask) { |
| 1282 | netdev_dbg(nic->netdev, |
| 1283 | "Failed to clear interrupt: unknown type\n"); |
| 1284 | return; |
| 1285 | } |
| 1286 | |
| 1287 | nicvf_reg_write(nic, NIC_VF_INT, mask); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1288 | } |
| 1289 | |
| 1290 | /* Check if interrupt is enabled */ |
| 1291 | int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx) |
| 1292 | { |
Yury Norov | b45ceb4 | 2015-12-07 10:30:32 +0530 | [diff] [blame] | 1293 | u64 mask = nicvf_int_type_to_mask(int_type, q_idx); |
| 1294 | /* If interrupt type is unknown, we treat it disabled. */ |
| 1295 | if (!mask) { |
| 1296 | netdev_dbg(nic->netdev, |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1297 | "Failed to check interrupt enable: unknown type\n"); |
Yury Norov | b45ceb4 | 2015-12-07 10:30:32 +0530 | [diff] [blame] | 1298 | return 0; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1299 | } |
| 1300 | |
Yury Norov | b45ceb4 | 2015-12-07 10:30:32 +0530 | [diff] [blame] | 1301 | return mask & nicvf_reg_read(nic, NIC_VF_ENA_W1S); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1302 | } |
| 1303 | |
| 1304 | void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx) |
| 1305 | { |
| 1306 | struct rcv_queue *rq; |
| 1307 | |
| 1308 | #define GET_RQ_STATS(reg) \ |
| 1309 | nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\ |
| 1310 | (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) |
| 1311 | |
| 1312 | rq = &nic->qs->rq[rq_idx]; |
| 1313 | rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS); |
| 1314 | rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS); |
| 1315 | } |
| 1316 | |
| 1317 | void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx) |
| 1318 | { |
| 1319 | struct snd_queue *sq; |
| 1320 | |
| 1321 | #define GET_SQ_STATS(reg) \ |
| 1322 | nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\ |
| 1323 | (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) |
| 1324 | |
| 1325 | sq = &nic->qs->sq[sq_idx]; |
| 1326 | sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS); |
| 1327 | sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS); |
| 1328 | } |
| 1329 | |
| 1330 | /* Check for errors in the receive cmp.queue entry */ |
| 1331 | int nicvf_check_cqe_rx_errs(struct nicvf *nic, |
| 1332 | struct cmp_queue *cq, struct cqe_rx_t *cqe_rx) |
| 1333 | { |
Sunil Goutham | a2dc5de | 2015-08-30 12:29:10 +0300 | [diff] [blame] | 1334 | struct nicvf_hw_stats *stats = &nic->hw_stats; |
| 1335 | struct nicvf_drv_stats *drv_stats = &nic->drv_stats; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1336 | |
| 1337 | if (!cqe_rx->err_level && !cqe_rx->err_opcode) { |
Sunil Goutham | a2dc5de | 2015-08-30 12:29:10 +0300 | [diff] [blame] | 1338 | drv_stats->rx_frames_ok++; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1339 | return 0; |
| 1340 | } |
| 1341 | |
| 1342 | if (netif_msg_rx_err(nic)) |
| 1343 | netdev_err(nic->netdev, |
| 1344 | "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n", |
| 1345 | nic->netdev->name, |
| 1346 | cqe_rx->err_level, cqe_rx->err_opcode); |
| 1347 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1348 | switch (cqe_rx->err_opcode) { |
| 1349 | case CQ_RX_ERROP_RE_PARTIAL: |
Sunil Goutham | a2dc5de | 2015-08-30 12:29:10 +0300 | [diff] [blame] | 1350 | stats->rx_bgx_truncated_pkts++; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1351 | break; |
| 1352 | case CQ_RX_ERROP_RE_JABBER: |
Sunil Goutham | a2dc5de | 2015-08-30 12:29:10 +0300 | [diff] [blame] | 1353 | stats->rx_jabber_errs++; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1354 | break; |
| 1355 | case CQ_RX_ERROP_RE_FCS: |
Sunil Goutham | a2dc5de | 2015-08-30 12:29:10 +0300 | [diff] [blame] | 1356 | stats->rx_fcs_errs++; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1357 | break; |
| 1358 | case CQ_RX_ERROP_RE_RX_CTL: |
Sunil Goutham | a2dc5de | 2015-08-30 12:29:10 +0300 | [diff] [blame] | 1359 | stats->rx_bgx_errs++; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1360 | break; |
| 1361 | case CQ_RX_ERROP_PREL2_ERR: |
Sunil Goutham | a2dc5de | 2015-08-30 12:29:10 +0300 | [diff] [blame] | 1362 | stats->rx_prel2_errs++; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1363 | break; |
| 1364 | case CQ_RX_ERROP_L2_MAL: |
Sunil Goutham | a2dc5de | 2015-08-30 12:29:10 +0300 | [diff] [blame] | 1365 | stats->rx_l2_hdr_malformed++; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1366 | break; |
| 1367 | case CQ_RX_ERROP_L2_OVERSIZE: |
Sunil Goutham | a2dc5de | 2015-08-30 12:29:10 +0300 | [diff] [blame] | 1368 | stats->rx_oversize++; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1369 | break; |
| 1370 | case CQ_RX_ERROP_L2_UNDERSIZE: |
Sunil Goutham | a2dc5de | 2015-08-30 12:29:10 +0300 | [diff] [blame] | 1371 | stats->rx_undersize++; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1372 | break; |
| 1373 | case CQ_RX_ERROP_L2_LENMISM: |
Sunil Goutham | a2dc5de | 2015-08-30 12:29:10 +0300 | [diff] [blame] | 1374 | stats->rx_l2_len_mismatch++; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1375 | break; |
| 1376 | case CQ_RX_ERROP_L2_PCLP: |
Sunil Goutham | a2dc5de | 2015-08-30 12:29:10 +0300 | [diff] [blame] | 1377 | stats->rx_l2_pclp++; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1378 | break; |
| 1379 | case CQ_RX_ERROP_IP_NOT: |
Sunil Goutham | a2dc5de | 2015-08-30 12:29:10 +0300 | [diff] [blame] | 1380 | stats->rx_ip_ver_errs++; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1381 | break; |
| 1382 | case CQ_RX_ERROP_IP_CSUM_ERR: |
Sunil Goutham | a2dc5de | 2015-08-30 12:29:10 +0300 | [diff] [blame] | 1383 | stats->rx_ip_csum_errs++; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1384 | break; |
| 1385 | case CQ_RX_ERROP_IP_MAL: |
Sunil Goutham | a2dc5de | 2015-08-30 12:29:10 +0300 | [diff] [blame] | 1386 | stats->rx_ip_hdr_malformed++; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1387 | break; |
| 1388 | case CQ_RX_ERROP_IP_MALD: |
Sunil Goutham | a2dc5de | 2015-08-30 12:29:10 +0300 | [diff] [blame] | 1389 | stats->rx_ip_payload_malformed++; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1390 | break; |
| 1391 | case CQ_RX_ERROP_IP_HOP: |
Sunil Goutham | a2dc5de | 2015-08-30 12:29:10 +0300 | [diff] [blame] | 1392 | stats->rx_ip_ttl_errs++; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1393 | break; |
| 1394 | case CQ_RX_ERROP_L3_PCLP: |
Sunil Goutham | a2dc5de | 2015-08-30 12:29:10 +0300 | [diff] [blame] | 1395 | stats->rx_l3_pclp++; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1396 | break; |
| 1397 | case CQ_RX_ERROP_L4_MAL: |
Sunil Goutham | a2dc5de | 2015-08-30 12:29:10 +0300 | [diff] [blame] | 1398 | stats->rx_l4_malformed++; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1399 | break; |
| 1400 | case CQ_RX_ERROP_L4_CHK: |
Sunil Goutham | a2dc5de | 2015-08-30 12:29:10 +0300 | [diff] [blame] | 1401 | stats->rx_l4_csum_errs++; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1402 | break; |
| 1403 | case CQ_RX_ERROP_UDP_LEN: |
Sunil Goutham | a2dc5de | 2015-08-30 12:29:10 +0300 | [diff] [blame] | 1404 | stats->rx_udp_len_errs++; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1405 | break; |
| 1406 | case CQ_RX_ERROP_L4_PORT: |
Sunil Goutham | a2dc5de | 2015-08-30 12:29:10 +0300 | [diff] [blame] | 1407 | stats->rx_l4_port_errs++; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1408 | break; |
| 1409 | case CQ_RX_ERROP_TCP_FLAG: |
Sunil Goutham | a2dc5de | 2015-08-30 12:29:10 +0300 | [diff] [blame] | 1410 | stats->rx_tcp_flag_errs++; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1411 | break; |
| 1412 | case CQ_RX_ERROP_TCP_OFFSET: |
Sunil Goutham | a2dc5de | 2015-08-30 12:29:10 +0300 | [diff] [blame] | 1413 | stats->rx_tcp_offset_errs++; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1414 | break; |
| 1415 | case CQ_RX_ERROP_L4_PCLP: |
Sunil Goutham | a2dc5de | 2015-08-30 12:29:10 +0300 | [diff] [blame] | 1416 | stats->rx_l4_pclp++; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1417 | break; |
| 1418 | case CQ_RX_ERROP_RBDR_TRUNC: |
Sunil Goutham | a2dc5de | 2015-08-30 12:29:10 +0300 | [diff] [blame] | 1419 | stats->rx_truncated_pkts++; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1420 | break; |
| 1421 | } |
| 1422 | |
| 1423 | return 1; |
| 1424 | } |
| 1425 | |
| 1426 | /* Check for errors in the send cmp.queue entry */ |
| 1427 | int nicvf_check_cqe_tx_errs(struct nicvf *nic, |
| 1428 | struct cmp_queue *cq, struct cqe_send_t *cqe_tx) |
| 1429 | { |
| 1430 | struct cmp_queue_stats *stats = &cq->stats; |
| 1431 | |
| 1432 | switch (cqe_tx->send_status) { |
| 1433 | case CQ_TX_ERROP_GOOD: |
| 1434 | stats->tx.good++; |
| 1435 | return 0; |
| 1436 | case CQ_TX_ERROP_DESC_FAULT: |
| 1437 | stats->tx.desc_fault++; |
| 1438 | break; |
| 1439 | case CQ_TX_ERROP_HDR_CONS_ERR: |
| 1440 | stats->tx.hdr_cons_err++; |
| 1441 | break; |
| 1442 | case CQ_TX_ERROP_SUBDC_ERR: |
| 1443 | stats->tx.subdesc_err++; |
| 1444 | break; |
| 1445 | case CQ_TX_ERROP_IMM_SIZE_OFLOW: |
| 1446 | stats->tx.imm_size_oflow++; |
| 1447 | break; |
| 1448 | case CQ_TX_ERROP_DATA_SEQUENCE_ERR: |
| 1449 | stats->tx.data_seq_err++; |
| 1450 | break; |
| 1451 | case CQ_TX_ERROP_MEM_SEQUENCE_ERR: |
| 1452 | stats->tx.mem_seq_err++; |
| 1453 | break; |
| 1454 | case CQ_TX_ERROP_LOCK_VIOL: |
| 1455 | stats->tx.lock_viol++; |
| 1456 | break; |
| 1457 | case CQ_TX_ERROP_DATA_FAULT: |
| 1458 | stats->tx.data_fault++; |
| 1459 | break; |
| 1460 | case CQ_TX_ERROP_TSTMP_CONFLICT: |
| 1461 | stats->tx.tstmp_conflict++; |
| 1462 | break; |
| 1463 | case CQ_TX_ERROP_TSTMP_TIMEOUT: |
| 1464 | stats->tx.tstmp_timeout++; |
| 1465 | break; |
| 1466 | case CQ_TX_ERROP_MEM_FAULT: |
| 1467 | stats->tx.mem_fault++; |
| 1468 | break; |
| 1469 | case CQ_TX_ERROP_CK_OVERLAP: |
| 1470 | stats->tx.csum_overlap++; |
| 1471 | break; |
| 1472 | case CQ_TX_ERROP_CK_OFLOW: |
| 1473 | stats->tx.csum_overflow++; |
| 1474 | break; |
| 1475 | } |
| 1476 | |
| 1477 | return 1; |
| 1478 | } |