Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2015 Cavium, Inc. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify it |
| 5 | * under the terms of version 2 of the GNU General Public License |
| 6 | * as published by the Free Software Foundation. |
| 7 | */ |
| 8 | |
| 9 | #include <linux/pci.h> |
| 10 | #include <linux/netdevice.h> |
| 11 | #include <linux/ip.h> |
| 12 | #include <linux/etherdevice.h> |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame^] | 13 | #include <linux/iommu.h> |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 14 | #include <net/ip.h> |
| 15 | #include <net/tso.h> |
| 16 | |
| 17 | #include "nic_reg.h" |
| 18 | #include "nic.h" |
| 19 | #include "q_struct.h" |
| 20 | #include "nicvf_queues.h" |
| 21 | |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame^] | 22 | #define NICVF_PAGE_ORDER ((PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0) |
| 23 | |
| 24 | static inline u64 nicvf_iova_to_phys(struct nicvf *nic, dma_addr_t dma_addr) |
| 25 | { |
| 26 | /* Translation is installed only when IOMMU is present */ |
| 27 | if (nic->iommu_domain) |
| 28 | return iommu_iova_to_phys(nic->iommu_domain, dma_addr); |
| 29 | return dma_addr; |
| 30 | } |
| 31 | |
Sunil Goutham | 5c2e26f | 2016-03-14 16:36:14 +0530 | [diff] [blame] | 32 | static void nicvf_get_page(struct nicvf *nic) |
| 33 | { |
| 34 | if (!nic->rb_pageref || !nic->rb_page) |
| 35 | return; |
| 36 | |
Joonsoo Kim | 6d061f9 | 2016-05-19 17:10:46 -0700 | [diff] [blame] | 37 | page_ref_add(nic->rb_page, nic->rb_pageref); |
Sunil Goutham | 5c2e26f | 2016-03-14 16:36:14 +0530 | [diff] [blame] | 38 | nic->rb_pageref = 0; |
| 39 | } |
| 40 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 41 | /* Poll a register for a specific value */ |
| 42 | static int nicvf_poll_reg(struct nicvf *nic, int qidx, |
| 43 | u64 reg, int bit_pos, int bits, int val) |
| 44 | { |
| 45 | u64 bit_mask; |
| 46 | u64 reg_val; |
| 47 | int timeout = 10; |
| 48 | |
| 49 | bit_mask = (1ULL << bits) - 1; |
| 50 | bit_mask = (bit_mask << bit_pos); |
| 51 | |
| 52 | while (timeout) { |
| 53 | reg_val = nicvf_queue_reg_read(nic, reg, qidx); |
| 54 | if (((reg_val & bit_mask) >> bit_pos) == val) |
| 55 | return 0; |
| 56 | usleep_range(1000, 2000); |
| 57 | timeout--; |
| 58 | } |
| 59 | netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg); |
| 60 | return 1; |
| 61 | } |
| 62 | |
| 63 | /* Allocate memory for a queue's descriptors */ |
| 64 | static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem, |
| 65 | int q_len, int desc_size, int align_bytes) |
| 66 | { |
| 67 | dmem->q_len = q_len; |
| 68 | dmem->size = (desc_size * q_len) + align_bytes; |
| 69 | /* Save address, need it while freeing */ |
| 70 | dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size, |
| 71 | &dmem->dma, GFP_KERNEL); |
| 72 | if (!dmem->unalign_base) |
| 73 | return -ENOMEM; |
| 74 | |
| 75 | /* Align memory address for 'align_bytes' */ |
| 76 | dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes); |
Aleksey Makarov | 39a0dd0 | 2015-06-02 11:00:25 -0700 | [diff] [blame] | 77 | dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 78 | return 0; |
| 79 | } |
| 80 | |
| 81 | /* Free queue's descriptor memory */ |
| 82 | static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem) |
| 83 | { |
| 84 | if (!dmem) |
| 85 | return; |
| 86 | |
| 87 | dma_free_coherent(&nic->pdev->dev, dmem->size, |
| 88 | dmem->unalign_base, dmem->dma); |
| 89 | dmem->unalign_base = NULL; |
| 90 | dmem->base = NULL; |
| 91 | } |
| 92 | |
| 93 | /* Allocate buffer for packet reception |
| 94 | * HW returns memory address where packet is DMA'ed but not a pointer |
| 95 | * into RBDR ring, so save buffer address at the start of fragment and |
| 96 | * align the start address to a cache aligned address |
| 97 | */ |
| 98 | static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, |
| 99 | u32 buf_len, u64 **rbuf) |
| 100 | { |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame^] | 101 | int order = NICVF_PAGE_ORDER; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 102 | |
| 103 | /* Check if request can be accomodated in previous allocated page */ |
Sunil Goutham | 5c2e26f | 2016-03-14 16:36:14 +0530 | [diff] [blame] | 104 | if (nic->rb_page && |
| 105 | ((nic->rb_page_offset + buf_len) < (PAGE_SIZE << order))) { |
| 106 | nic->rb_pageref++; |
| 107 | goto ret; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 108 | } |
| 109 | |
Sunil Goutham | 5c2e26f | 2016-03-14 16:36:14 +0530 | [diff] [blame] | 110 | nicvf_get_page(nic); |
Sunil Goutham | 5c2e26f | 2016-03-14 16:36:14 +0530 | [diff] [blame] | 111 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 112 | /* Allocate a new page */ |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame^] | 113 | nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, |
| 114 | order); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 115 | if (!nic->rb_page) { |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame^] | 116 | this_cpu_inc(nic->pnicvf->drv_stats->rcv_buffer_alloc_failures); |
| 117 | return -ENOMEM; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 118 | } |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame^] | 119 | nic->rb_page_offset = 0; |
Sunil Goutham | 5c2e26f | 2016-03-14 16:36:14 +0530 | [diff] [blame] | 120 | ret: |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame^] | 121 | /* HW will ensure data coherency, CPU sync not required */ |
| 122 | *rbuf = (u64 *)((u64)dma_map_page_attrs(&nic->pdev->dev, nic->rb_page, |
| 123 | nic->rb_page_offset, buf_len, |
| 124 | DMA_FROM_DEVICE, |
| 125 | DMA_ATTR_SKIP_CPU_SYNC)); |
| 126 | if (dma_mapping_error(&nic->pdev->dev, (dma_addr_t)*rbuf)) { |
| 127 | if (!nic->rb_page_offset) |
| 128 | __free_pages(nic->rb_page, order); |
| 129 | nic->rb_page = NULL; |
| 130 | return -ENOMEM; |
| 131 | } |
Sunil Goutham | 5c2e26f | 2016-03-14 16:36:14 +0530 | [diff] [blame] | 132 | nic->rb_page_offset += buf_len; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 133 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 134 | return 0; |
| 135 | } |
| 136 | |
Sunil Goutham | 668dda0 | 2015-12-07 10:30:33 +0530 | [diff] [blame] | 137 | /* Build skb around receive buffer */ |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 138 | static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic, |
| 139 | u64 rb_ptr, int len) |
| 140 | { |
Sunil Goutham | 668dda0 | 2015-12-07 10:30:33 +0530 | [diff] [blame] | 141 | void *data; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 142 | struct sk_buff *skb; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 143 | |
Sunil Goutham | 668dda0 | 2015-12-07 10:30:33 +0530 | [diff] [blame] | 144 | data = phys_to_virt(rb_ptr); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 145 | |
| 146 | /* Now build an skb to give to stack */ |
Sunil Goutham | 668dda0 | 2015-12-07 10:30:33 +0530 | [diff] [blame] | 147 | skb = build_skb(data, RCV_FRAG_LEN); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 148 | if (!skb) { |
Sunil Goutham | 668dda0 | 2015-12-07 10:30:33 +0530 | [diff] [blame] | 149 | put_page(virt_to_page(data)); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 150 | return NULL; |
| 151 | } |
| 152 | |
Sunil Goutham | 668dda0 | 2015-12-07 10:30:33 +0530 | [diff] [blame] | 153 | prefetch(skb->data); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 154 | return skb; |
| 155 | } |
| 156 | |
| 157 | /* Allocate RBDR ring and populate receive buffers */ |
| 158 | static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, |
| 159 | int ring_len, int buf_size) |
| 160 | { |
| 161 | int idx; |
| 162 | u64 *rbuf; |
| 163 | struct rbdr_entry_t *desc; |
| 164 | int err; |
| 165 | |
| 166 | err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len, |
| 167 | sizeof(struct rbdr_entry_t), |
| 168 | NICVF_RCV_BUF_ALIGN_BYTES); |
| 169 | if (err) |
| 170 | return err; |
| 171 | |
| 172 | rbdr->desc = rbdr->dmem.base; |
| 173 | /* Buffer size has to be in multiples of 128 bytes */ |
| 174 | rbdr->dma_size = buf_size; |
| 175 | rbdr->enable = true; |
| 176 | rbdr->thresh = RBDR_THRESH; |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame^] | 177 | rbdr->head = 0; |
| 178 | rbdr->tail = 0; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 179 | |
| 180 | nic->rb_page = NULL; |
| 181 | for (idx = 0; idx < ring_len; idx++) { |
| 182 | err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN, |
| 183 | &rbuf); |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame^] | 184 | if (err) { |
| 185 | /* To free already allocated and mapped ones */ |
| 186 | rbdr->tail = idx - 1; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 187 | return err; |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame^] | 188 | } |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 189 | |
| 190 | desc = GET_RBDR_DESC(rbdr, idx); |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame^] | 191 | desc->buf_addr = (u64)rbuf >> NICVF_RCV_BUF_ALIGN; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 192 | } |
Sunil Goutham | 5c2e26f | 2016-03-14 16:36:14 +0530 | [diff] [blame] | 193 | |
| 194 | nicvf_get_page(nic); |
| 195 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 196 | return 0; |
| 197 | } |
| 198 | |
| 199 | /* Free RBDR ring and its receive buffers */ |
| 200 | static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) |
| 201 | { |
| 202 | int head, tail; |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame^] | 203 | u64 buf_addr, phys_addr; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 204 | struct rbdr_entry_t *desc; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 205 | |
| 206 | if (!rbdr) |
| 207 | return; |
| 208 | |
| 209 | rbdr->enable = false; |
| 210 | if (!rbdr->dmem.base) |
| 211 | return; |
| 212 | |
| 213 | head = rbdr->head; |
| 214 | tail = rbdr->tail; |
| 215 | |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame^] | 216 | /* Release page references */ |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 217 | while (head != tail) { |
| 218 | desc = GET_RBDR_DESC(rbdr, head); |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame^] | 219 | buf_addr = ((u64)desc->buf_addr) << NICVF_RCV_BUF_ALIGN; |
| 220 | phys_addr = nicvf_iova_to_phys(nic, buf_addr); |
| 221 | dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN, |
| 222 | DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); |
| 223 | if (phys_addr) |
| 224 | put_page(virt_to_page(phys_to_virt(phys_addr))); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 225 | head++; |
| 226 | head &= (rbdr->dmem.q_len - 1); |
| 227 | } |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame^] | 228 | /* Release buffer of tail desc */ |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 229 | desc = GET_RBDR_DESC(rbdr, tail); |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame^] | 230 | buf_addr = ((u64)desc->buf_addr) << NICVF_RCV_BUF_ALIGN; |
| 231 | phys_addr = nicvf_iova_to_phys(nic, buf_addr); |
| 232 | dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN, |
| 233 | DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); |
| 234 | if (phys_addr) |
| 235 | put_page(virt_to_page(phys_to_virt(phys_addr))); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 236 | |
| 237 | /* Free RBDR ring */ |
| 238 | nicvf_free_q_desc_mem(nic, &rbdr->dmem); |
| 239 | } |
| 240 | |
| 241 | /* Refill receive buffer descriptors with new buffers. |
| 242 | */ |
Aleksey Makarov | fd7ec06 | 2015-06-02 11:00:23 -0700 | [diff] [blame] | 243 | static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp) |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 244 | { |
| 245 | struct queue_set *qs = nic->qs; |
| 246 | int rbdr_idx = qs->rbdr_cnt; |
| 247 | int tail, qcount; |
| 248 | int refill_rb_cnt; |
| 249 | struct rbdr *rbdr; |
| 250 | struct rbdr_entry_t *desc; |
| 251 | u64 *rbuf; |
| 252 | int new_rb = 0; |
| 253 | |
| 254 | refill: |
| 255 | if (!rbdr_idx) |
| 256 | return; |
| 257 | rbdr_idx--; |
| 258 | rbdr = &qs->rbdr[rbdr_idx]; |
| 259 | /* Check if it's enabled */ |
| 260 | if (!rbdr->enable) |
| 261 | goto next_rbdr; |
| 262 | |
| 263 | /* Get no of desc's to be refilled */ |
| 264 | qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx); |
| 265 | qcount &= 0x7FFFF; |
| 266 | /* Doorbell can be ringed with a max of ring size minus 1 */ |
| 267 | if (qcount >= (qs->rbdr_len - 1)) |
| 268 | goto next_rbdr; |
| 269 | else |
| 270 | refill_rb_cnt = qs->rbdr_len - qcount - 1; |
| 271 | |
| 272 | /* Start filling descs from tail */ |
| 273 | tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3; |
| 274 | while (refill_rb_cnt) { |
| 275 | tail++; |
| 276 | tail &= (rbdr->dmem.q_len - 1); |
| 277 | |
| 278 | if (nicvf_alloc_rcv_buffer(nic, gfp, RCV_FRAG_LEN, &rbuf)) |
| 279 | break; |
| 280 | |
| 281 | desc = GET_RBDR_DESC(rbdr, tail); |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame^] | 282 | desc->buf_addr = (u64)rbuf >> NICVF_RCV_BUF_ALIGN; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 283 | refill_rb_cnt--; |
| 284 | new_rb++; |
| 285 | } |
| 286 | |
Sunil Goutham | 5c2e26f | 2016-03-14 16:36:14 +0530 | [diff] [blame] | 287 | nicvf_get_page(nic); |
| 288 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 289 | /* make sure all memory stores are done before ringing doorbell */ |
| 290 | smp_wmb(); |
| 291 | |
| 292 | /* Check if buffer allocation failed */ |
| 293 | if (refill_rb_cnt) |
| 294 | nic->rb_alloc_fail = true; |
| 295 | else |
| 296 | nic->rb_alloc_fail = false; |
| 297 | |
| 298 | /* Notify HW */ |
| 299 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, |
| 300 | rbdr_idx, new_rb); |
| 301 | next_rbdr: |
| 302 | /* Re-enable RBDR interrupts only if buffer allocation is success */ |
Sunil Goutham | c94acf8 | 2016-11-15 17:38:29 +0530 | [diff] [blame] | 303 | if (!nic->rb_alloc_fail && rbdr->enable && |
| 304 | netif_running(nic->pnicvf->netdev)) |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 305 | nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx); |
| 306 | |
| 307 | if (rbdr_idx) |
| 308 | goto refill; |
| 309 | } |
| 310 | |
| 311 | /* Alloc rcv buffers in non-atomic mode for better success */ |
| 312 | void nicvf_rbdr_work(struct work_struct *work) |
| 313 | { |
| 314 | struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work); |
| 315 | |
| 316 | nicvf_refill_rbdr(nic, GFP_KERNEL); |
| 317 | if (nic->rb_alloc_fail) |
| 318 | schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10)); |
| 319 | else |
| 320 | nic->rb_work_scheduled = false; |
| 321 | } |
| 322 | |
| 323 | /* In Softirq context, alloc rcv buffers in atomic mode */ |
| 324 | void nicvf_rbdr_task(unsigned long data) |
| 325 | { |
| 326 | struct nicvf *nic = (struct nicvf *)data; |
| 327 | |
| 328 | nicvf_refill_rbdr(nic, GFP_ATOMIC); |
| 329 | if (nic->rb_alloc_fail) { |
| 330 | nic->rb_work_scheduled = true; |
| 331 | schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10)); |
| 332 | } |
| 333 | } |
| 334 | |
| 335 | /* Initialize completion queue */ |
| 336 | static int nicvf_init_cmp_queue(struct nicvf *nic, |
| 337 | struct cmp_queue *cq, int q_len) |
| 338 | { |
| 339 | int err; |
| 340 | |
| 341 | err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE, |
| 342 | NICVF_CQ_BASE_ALIGN_BYTES); |
| 343 | if (err) |
| 344 | return err; |
| 345 | |
| 346 | cq->desc = cq->dmem.base; |
Sunil Goutham | b9687b4 | 2015-12-10 13:25:20 +0530 | [diff] [blame] | 347 | cq->thresh = pass1_silicon(nic->pdev) ? 0 : CMP_QUEUE_CQE_THRESH; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 348 | nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1; |
| 349 | |
| 350 | return 0; |
| 351 | } |
| 352 | |
| 353 | static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq) |
| 354 | { |
| 355 | if (!cq) |
| 356 | return; |
| 357 | if (!cq->dmem.base) |
| 358 | return; |
| 359 | |
| 360 | nicvf_free_q_desc_mem(nic, &cq->dmem); |
| 361 | } |
| 362 | |
| 363 | /* Initialize transmit queue */ |
| 364 | static int nicvf_init_snd_queue(struct nicvf *nic, |
| 365 | struct snd_queue *sq, int q_len) |
| 366 | { |
| 367 | int err; |
| 368 | |
| 369 | err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE, |
| 370 | NICVF_SQ_BASE_ALIGN_BYTES); |
| 371 | if (err) |
| 372 | return err; |
| 373 | |
| 374 | sq->desc = sq->dmem.base; |
Aleksey Makarov | 86ace69 | 2015-06-02 11:00:27 -0700 | [diff] [blame] | 375 | sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL); |
Aleksey Makarov | fa1a6c9 | 2015-06-02 11:00:26 -0700 | [diff] [blame] | 376 | if (!sq->skbuff) |
| 377 | return -ENOMEM; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 378 | sq->head = 0; |
| 379 | sq->tail = 0; |
| 380 | atomic_set(&sq->free_cnt, q_len - 1); |
| 381 | sq->thresh = SND_QUEUE_THRESH; |
| 382 | |
| 383 | /* Preallocate memory for TSO segment's header */ |
| 384 | sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev, |
| 385 | q_len * TSO_HEADER_SIZE, |
| 386 | &sq->tso_hdrs_phys, GFP_KERNEL); |
| 387 | if (!sq->tso_hdrs) |
| 388 | return -ENOMEM; |
| 389 | |
| 390 | return 0; |
| 391 | } |
| 392 | |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame^] | 393 | void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq, |
| 394 | int hdr_sqe, u8 subdesc_cnt) |
| 395 | { |
| 396 | u8 idx; |
| 397 | struct sq_gather_subdesc *gather; |
| 398 | |
| 399 | /* Unmap DMA mapped skb data buffers */ |
| 400 | for (idx = 0; idx < subdesc_cnt; idx++) { |
| 401 | hdr_sqe++; |
| 402 | hdr_sqe &= (sq->dmem.q_len - 1); |
| 403 | gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, hdr_sqe); |
| 404 | /* HW will ensure data coherency, CPU sync not required */ |
| 405 | dma_unmap_page_attrs(&nic->pdev->dev, gather->addr, |
| 406 | gather->size, DMA_TO_DEVICE, |
| 407 | DMA_ATTR_SKIP_CPU_SYNC); |
| 408 | } |
| 409 | } |
| 410 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 411 | static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) |
| 412 | { |
Sunil Goutham | c94acf8 | 2016-11-15 17:38:29 +0530 | [diff] [blame] | 413 | struct sk_buff *skb; |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame^] | 414 | struct sq_hdr_subdesc *hdr; |
| 415 | struct sq_hdr_subdesc *tso_sqe; |
Sunil Goutham | c94acf8 | 2016-11-15 17:38:29 +0530 | [diff] [blame] | 416 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 417 | if (!sq) |
| 418 | return; |
| 419 | if (!sq->dmem.base) |
| 420 | return; |
| 421 | |
| 422 | if (sq->tso_hdrs) |
Sunil Goutham | 143ceb0 | 2015-07-29 16:49:37 +0300 | [diff] [blame] | 423 | dma_free_coherent(&nic->pdev->dev, |
| 424 | sq->dmem.q_len * TSO_HEADER_SIZE, |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 425 | sq->tso_hdrs, sq->tso_hdrs_phys); |
| 426 | |
Sunil Goutham | c94acf8 | 2016-11-15 17:38:29 +0530 | [diff] [blame] | 427 | /* Free pending skbs in the queue */ |
| 428 | smp_rmb(); |
| 429 | while (sq->head != sq->tail) { |
| 430 | skb = (struct sk_buff *)sq->skbuff[sq->head]; |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame^] | 431 | if (!skb) |
| 432 | goto next; |
| 433 | hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); |
| 434 | /* Check for dummy descriptor used for HW TSO offload on 88xx */ |
| 435 | if (hdr->dont_send) { |
| 436 | /* Get actual TSO descriptors and unmap them */ |
| 437 | tso_sqe = |
| 438 | (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2); |
| 439 | nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2, |
| 440 | tso_sqe->subdesc_cnt); |
| 441 | } else { |
| 442 | nicvf_unmap_sndq_buffers(nic, sq, sq->head, |
| 443 | hdr->subdesc_cnt); |
| 444 | } |
| 445 | dev_kfree_skb_any(skb); |
| 446 | next: |
Sunil Goutham | c94acf8 | 2016-11-15 17:38:29 +0530 | [diff] [blame] | 447 | sq->head++; |
| 448 | sq->head &= (sq->dmem.q_len - 1); |
| 449 | } |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 450 | kfree(sq->skbuff); |
| 451 | nicvf_free_q_desc_mem(nic, &sq->dmem); |
| 452 | } |
| 453 | |
| 454 | static void nicvf_reclaim_snd_queue(struct nicvf *nic, |
| 455 | struct queue_set *qs, int qidx) |
| 456 | { |
| 457 | /* Disable send queue */ |
| 458 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0); |
| 459 | /* Check if SQ is stopped */ |
| 460 | if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01)) |
| 461 | return; |
| 462 | /* Reset send queue */ |
| 463 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); |
| 464 | } |
| 465 | |
| 466 | static void nicvf_reclaim_rcv_queue(struct nicvf *nic, |
| 467 | struct queue_set *qs, int qidx) |
| 468 | { |
| 469 | union nic_mbx mbx = {}; |
| 470 | |
| 471 | /* Make sure all packets in the pipeline are written back into mem */ |
| 472 | mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC; |
| 473 | nicvf_send_msg_to_pf(nic, &mbx); |
| 474 | } |
| 475 | |
| 476 | static void nicvf_reclaim_cmp_queue(struct nicvf *nic, |
| 477 | struct queue_set *qs, int qidx) |
| 478 | { |
| 479 | /* Disable timer threshold (doesn't get reset upon CQ reset */ |
| 480 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0); |
| 481 | /* Disable completion queue */ |
| 482 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0); |
| 483 | /* Reset completion queue */ |
| 484 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); |
| 485 | } |
| 486 | |
| 487 | static void nicvf_reclaim_rbdr(struct nicvf *nic, |
| 488 | struct rbdr *rbdr, int qidx) |
| 489 | { |
| 490 | u64 tmp, fifo_state; |
| 491 | int timeout = 10; |
| 492 | |
| 493 | /* Save head and tail pointers for feeing up buffers */ |
| 494 | rbdr->head = nicvf_queue_reg_read(nic, |
| 495 | NIC_QSET_RBDR_0_1_HEAD, |
| 496 | qidx) >> 3; |
| 497 | rbdr->tail = nicvf_queue_reg_read(nic, |
| 498 | NIC_QSET_RBDR_0_1_TAIL, |
| 499 | qidx) >> 3; |
| 500 | |
| 501 | /* If RBDR FIFO is in 'FAIL' state then do a reset first |
| 502 | * before relaiming. |
| 503 | */ |
| 504 | fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx); |
| 505 | if (((fifo_state >> 62) & 0x03) == 0x3) |
| 506 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, |
| 507 | qidx, NICVF_RBDR_RESET); |
| 508 | |
| 509 | /* Disable RBDR */ |
| 510 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0); |
| 511 | if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) |
| 512 | return; |
| 513 | while (1) { |
| 514 | tmp = nicvf_queue_reg_read(nic, |
| 515 | NIC_QSET_RBDR_0_1_PREFETCH_STATUS, |
| 516 | qidx); |
| 517 | if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF)) |
| 518 | break; |
| 519 | usleep_range(1000, 2000); |
| 520 | timeout--; |
| 521 | if (!timeout) { |
| 522 | netdev_err(nic->netdev, |
| 523 | "Failed polling on prefetch status\n"); |
| 524 | return; |
| 525 | } |
| 526 | } |
| 527 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, |
| 528 | qidx, NICVF_RBDR_RESET); |
| 529 | |
| 530 | if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02)) |
| 531 | return; |
| 532 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00); |
| 533 | if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) |
| 534 | return; |
| 535 | } |
| 536 | |
Sunil Goutham | aa2e259 | 2015-08-30 12:29:13 +0300 | [diff] [blame] | 537 | void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features) |
| 538 | { |
| 539 | u64 rq_cfg; |
| 540 | int sqs; |
| 541 | |
| 542 | rq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_RQ_GEN_CFG, 0); |
| 543 | |
| 544 | /* Enable first VLAN stripping */ |
| 545 | if (features & NETIF_F_HW_VLAN_CTAG_RX) |
| 546 | rq_cfg |= (1ULL << 25); |
| 547 | else |
| 548 | rq_cfg &= ~(1ULL << 25); |
| 549 | nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, rq_cfg); |
| 550 | |
| 551 | /* Configure Secondary Qsets, if any */ |
| 552 | for (sqs = 0; sqs < nic->sqs_count; sqs++) |
| 553 | if (nic->snicvf[sqs]) |
| 554 | nicvf_queue_reg_write(nic->snicvf[sqs], |
| 555 | NIC_QSET_RQ_GEN_CFG, 0, rq_cfg); |
| 556 | } |
| 557 | |
Jerin Jacob | 3458c40 | 2016-08-12 16:51:39 +0530 | [diff] [blame] | 558 | static void nicvf_reset_rcv_queue_stats(struct nicvf *nic) |
| 559 | { |
| 560 | union nic_mbx mbx = {}; |
| 561 | |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 562 | /* Reset all RQ/SQ and VF stats */ |
Jerin Jacob | 3458c40 | 2016-08-12 16:51:39 +0530 | [diff] [blame] | 563 | mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER; |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 564 | mbx.reset_stat.rx_stat_mask = 0x3FFF; |
| 565 | mbx.reset_stat.tx_stat_mask = 0x1F; |
Jerin Jacob | 3458c40 | 2016-08-12 16:51:39 +0530 | [diff] [blame] | 566 | mbx.reset_stat.rq_stat_mask = 0xFFFF; |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 567 | mbx.reset_stat.sq_stat_mask = 0xFFFF; |
Jerin Jacob | 3458c40 | 2016-08-12 16:51:39 +0530 | [diff] [blame] | 568 | nicvf_send_msg_to_pf(nic, &mbx); |
| 569 | } |
| 570 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 571 | /* Configures receive queue */ |
| 572 | static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, |
| 573 | int qidx, bool enable) |
| 574 | { |
| 575 | union nic_mbx mbx = {}; |
| 576 | struct rcv_queue *rq; |
| 577 | struct rq_cfg rq_cfg; |
| 578 | |
| 579 | rq = &qs->rq[qidx]; |
| 580 | rq->enable = enable; |
| 581 | |
| 582 | /* Disable receive queue */ |
| 583 | nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0); |
| 584 | |
| 585 | if (!rq->enable) { |
| 586 | nicvf_reclaim_rcv_queue(nic, qs, qidx); |
| 587 | return; |
| 588 | } |
| 589 | |
| 590 | rq->cq_qs = qs->vnic_id; |
| 591 | rq->cq_idx = qidx; |
| 592 | rq->start_rbdr_qs = qs->vnic_id; |
| 593 | rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1; |
| 594 | rq->cont_rbdr_qs = qs->vnic_id; |
| 595 | rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1; |
| 596 | /* all writes of RBDR data to be loaded into L2 Cache as well*/ |
| 597 | rq->caching = 1; |
| 598 | |
| 599 | /* Send a mailbox msg to PF to config RQ */ |
| 600 | mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG; |
| 601 | mbx.rq.qs_num = qs->vnic_id; |
| 602 | mbx.rq.rq_num = qidx; |
| 603 | mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) | |
| 604 | (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) | |
| 605 | (rq->cont_qs_rbdr_idx << 8) | |
| 606 | (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx); |
| 607 | nicvf_send_msg_to_pf(nic, &mbx); |
| 608 | |
| 609 | mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG; |
Sunil Goutham | d5b2d7a | 2016-11-24 14:48:02 +0530 | [diff] [blame] | 610 | mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) | |
| 611 | (RQ_PASS_RBDR_LVL << 16) | (RQ_PASS_CQ_LVL << 8) | |
| 612 | (qs->vnic_id << 0); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 613 | nicvf_send_msg_to_pf(nic, &mbx); |
| 614 | |
| 615 | /* RQ drop config |
| 616 | * Enable CQ drop to reserve sufficient CQEs for all tx packets |
| 617 | */ |
| 618 | mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG; |
Sunil Goutham | d5b2d7a | 2016-11-24 14:48:02 +0530 | [diff] [blame] | 619 | mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) | |
| 620 | (RQ_PASS_RBDR_LVL << 40) | (RQ_DROP_RBDR_LVL << 32) | |
| 621 | (RQ_PASS_CQ_LVL << 16) | (RQ_DROP_CQ_LVL << 8); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 622 | nicvf_send_msg_to_pf(nic, &mbx); |
| 623 | |
Sunil Goutham | cadcf95 | 2016-11-15 17:37:54 +0530 | [diff] [blame] | 624 | if (!nic->sqs_mode && (qidx == 0)) { |
| 625 | /* Enable checking L3/L4 length and TCP/UDP checksums */ |
| 626 | nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, |
| 627 | (BIT(24) | BIT(23) | BIT(21))); |
Sunil Goutham | aa2e259 | 2015-08-30 12:29:13 +0300 | [diff] [blame] | 628 | nicvf_config_vlan_stripping(nic, nic->netdev->features); |
Sunil Goutham | cadcf95 | 2016-11-15 17:37:54 +0530 | [diff] [blame] | 629 | } |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 630 | |
| 631 | /* Enable Receive queue */ |
xypron.glpk@gmx.de | 161de2c | 2016-05-09 00:46:18 +0200 | [diff] [blame] | 632 | memset(&rq_cfg, 0, sizeof(struct rq_cfg)); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 633 | rq_cfg.ena = 1; |
| 634 | rq_cfg.tcp_ena = 0; |
| 635 | nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg); |
| 636 | } |
| 637 | |
| 638 | /* Configures completion queue */ |
| 639 | void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, |
| 640 | int qidx, bool enable) |
| 641 | { |
| 642 | struct cmp_queue *cq; |
| 643 | struct cq_cfg cq_cfg; |
| 644 | |
| 645 | cq = &qs->cq[qidx]; |
| 646 | cq->enable = enable; |
| 647 | |
| 648 | if (!cq->enable) { |
| 649 | nicvf_reclaim_cmp_queue(nic, qs, qidx); |
| 650 | return; |
| 651 | } |
| 652 | |
| 653 | /* Reset completion queue */ |
| 654 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); |
| 655 | |
| 656 | if (!cq->enable) |
| 657 | return; |
| 658 | |
| 659 | spin_lock_init(&cq->lock); |
| 660 | /* Set completion queue base address */ |
| 661 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, |
| 662 | qidx, (u64)(cq->dmem.phys_base)); |
| 663 | |
| 664 | /* Enable Completion queue */ |
xypron.glpk@gmx.de | 161de2c | 2016-05-09 00:46:18 +0200 | [diff] [blame] | 665 | memset(&cq_cfg, 0, sizeof(struct cq_cfg)); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 666 | cq_cfg.ena = 1; |
| 667 | cq_cfg.reset = 0; |
| 668 | cq_cfg.caching = 0; |
Sunil Goutham | fff4ffd | 2017-01-25 17:36:23 +0530 | [diff] [blame] | 669 | cq_cfg.qsize = ilog2(qs->cq_len >> 10); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 670 | cq_cfg.avg_con = 0; |
| 671 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg); |
| 672 | |
| 673 | /* Set threshold value for interrupt generation */ |
| 674 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); |
| 675 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, |
Sunil Goutham | 006394a | 2015-12-02 15:36:15 +0530 | [diff] [blame] | 676 | qidx, CMP_QUEUE_TIMER_THRESH); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 677 | } |
| 678 | |
| 679 | /* Configures transmit queue */ |
| 680 | static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, |
| 681 | int qidx, bool enable) |
| 682 | { |
| 683 | union nic_mbx mbx = {}; |
| 684 | struct snd_queue *sq; |
| 685 | struct sq_cfg sq_cfg; |
| 686 | |
| 687 | sq = &qs->sq[qidx]; |
| 688 | sq->enable = enable; |
| 689 | |
| 690 | if (!sq->enable) { |
| 691 | nicvf_reclaim_snd_queue(nic, qs, qidx); |
| 692 | return; |
| 693 | } |
| 694 | |
| 695 | /* Reset send queue */ |
| 696 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); |
| 697 | |
| 698 | sq->cq_qs = qs->vnic_id; |
| 699 | sq->cq_idx = qidx; |
| 700 | |
| 701 | /* Send a mailbox msg to PF to config SQ */ |
| 702 | mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG; |
| 703 | mbx.sq.qs_num = qs->vnic_id; |
| 704 | mbx.sq.sq_num = qidx; |
Sunil Goutham | 92dc876 | 2015-08-30 12:29:15 +0300 | [diff] [blame] | 705 | mbx.sq.sqs_mode = nic->sqs_mode; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 706 | mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx; |
| 707 | nicvf_send_msg_to_pf(nic, &mbx); |
| 708 | |
| 709 | /* Set queue base address */ |
| 710 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, |
| 711 | qidx, (u64)(sq->dmem.phys_base)); |
| 712 | |
| 713 | /* Enable send queue & set queue size */ |
xypron.glpk@gmx.de | 161de2c | 2016-05-09 00:46:18 +0200 | [diff] [blame] | 714 | memset(&sq_cfg, 0, sizeof(struct sq_cfg)); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 715 | sq_cfg.ena = 1; |
| 716 | sq_cfg.reset = 0; |
| 717 | sq_cfg.ldwb = 0; |
Sunil Goutham | fff4ffd | 2017-01-25 17:36:23 +0530 | [diff] [blame] | 718 | sq_cfg.qsize = ilog2(qs->sq_len >> 10); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 719 | sq_cfg.tstmp_bgx_intf = 0; |
Sunil Goutham | fff4ffd | 2017-01-25 17:36:23 +0530 | [diff] [blame] | 720 | /* CQ's level at which HW will stop processing SQEs to avoid |
| 721 | * transmitting a pkt with no space in CQ to post CQE_TX. |
| 722 | */ |
| 723 | sq_cfg.cq_limit = (CMP_QUEUE_PIPELINE_RSVD * 256) / qs->cq_len; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 724 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg); |
| 725 | |
| 726 | /* Set threshold value for interrupt generation */ |
| 727 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh); |
| 728 | |
| 729 | /* Set queue:cpu affinity for better load distribution */ |
| 730 | if (cpu_online(qidx)) { |
| 731 | cpumask_set_cpu(qidx, &sq->affinity_mask); |
| 732 | netif_set_xps_queue(nic->netdev, |
| 733 | &sq->affinity_mask, qidx); |
| 734 | } |
| 735 | } |
| 736 | |
| 737 | /* Configures receive buffer descriptor ring */ |
| 738 | static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, |
| 739 | int qidx, bool enable) |
| 740 | { |
| 741 | struct rbdr *rbdr; |
| 742 | struct rbdr_cfg rbdr_cfg; |
| 743 | |
| 744 | rbdr = &qs->rbdr[qidx]; |
| 745 | nicvf_reclaim_rbdr(nic, rbdr, qidx); |
| 746 | if (!enable) |
| 747 | return; |
| 748 | |
| 749 | /* Set descriptor base address */ |
| 750 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, |
| 751 | qidx, (u64)(rbdr->dmem.phys_base)); |
| 752 | |
| 753 | /* Enable RBDR & set queue size */ |
| 754 | /* Buffer size should be in multiples of 128 bytes */ |
xypron.glpk@gmx.de | 161de2c | 2016-05-09 00:46:18 +0200 | [diff] [blame] | 755 | memset(&rbdr_cfg, 0, sizeof(struct rbdr_cfg)); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 756 | rbdr_cfg.ena = 1; |
| 757 | rbdr_cfg.reset = 0; |
| 758 | rbdr_cfg.ldwb = 0; |
| 759 | rbdr_cfg.qsize = RBDR_SIZE; |
| 760 | rbdr_cfg.avg_con = 0; |
| 761 | rbdr_cfg.lines = rbdr->dma_size / 128; |
| 762 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, |
| 763 | qidx, *(u64 *)&rbdr_cfg); |
| 764 | |
| 765 | /* Notify HW */ |
| 766 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, |
| 767 | qidx, qs->rbdr_len - 1); |
| 768 | |
| 769 | /* Set threshold value for interrupt generation */ |
| 770 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, |
| 771 | qidx, rbdr->thresh - 1); |
| 772 | } |
| 773 | |
| 774 | /* Requests PF to assign and enable Qset */ |
| 775 | void nicvf_qset_config(struct nicvf *nic, bool enable) |
| 776 | { |
| 777 | union nic_mbx mbx = {}; |
| 778 | struct queue_set *qs = nic->qs; |
| 779 | struct qs_cfg *qs_cfg; |
| 780 | |
| 781 | if (!qs) { |
| 782 | netdev_warn(nic->netdev, |
| 783 | "Qset is still not allocated, don't init queues\n"); |
| 784 | return; |
| 785 | } |
| 786 | |
| 787 | qs->enable = enable; |
| 788 | qs->vnic_id = nic->vf_id; |
| 789 | |
| 790 | /* Send a mailbox msg to PF to config Qset */ |
| 791 | mbx.qs.msg = NIC_MBOX_MSG_QS_CFG; |
| 792 | mbx.qs.num = qs->vnic_id; |
Sunil Goutham | 92dc876 | 2015-08-30 12:29:15 +0300 | [diff] [blame] | 793 | mbx.qs.sqs_count = nic->sqs_count; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 794 | |
| 795 | mbx.qs.cfg = 0; |
| 796 | qs_cfg = (struct qs_cfg *)&mbx.qs.cfg; |
| 797 | if (qs->enable) { |
| 798 | qs_cfg->ena = 1; |
| 799 | #ifdef __BIG_ENDIAN |
| 800 | qs_cfg->be = 1; |
| 801 | #endif |
| 802 | qs_cfg->vnic = qs->vnic_id; |
| 803 | } |
| 804 | nicvf_send_msg_to_pf(nic, &mbx); |
| 805 | } |
| 806 | |
| 807 | static void nicvf_free_resources(struct nicvf *nic) |
| 808 | { |
| 809 | int qidx; |
| 810 | struct queue_set *qs = nic->qs; |
| 811 | |
| 812 | /* Free receive buffer descriptor ring */ |
| 813 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) |
| 814 | nicvf_free_rbdr(nic, &qs->rbdr[qidx]); |
| 815 | |
| 816 | /* Free completion queue */ |
| 817 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) |
| 818 | nicvf_free_cmp_queue(nic, &qs->cq[qidx]); |
| 819 | |
| 820 | /* Free send queue */ |
| 821 | for (qidx = 0; qidx < qs->sq_cnt; qidx++) |
| 822 | nicvf_free_snd_queue(nic, &qs->sq[qidx]); |
| 823 | } |
| 824 | |
| 825 | static int nicvf_alloc_resources(struct nicvf *nic) |
| 826 | { |
| 827 | int qidx; |
| 828 | struct queue_set *qs = nic->qs; |
| 829 | |
| 830 | /* Alloc receive buffer descriptor ring */ |
| 831 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { |
| 832 | if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len, |
| 833 | DMA_BUFFER_LEN)) |
| 834 | goto alloc_fail; |
| 835 | } |
| 836 | |
| 837 | /* Alloc send queue */ |
| 838 | for (qidx = 0; qidx < qs->sq_cnt; qidx++) { |
| 839 | if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len)) |
| 840 | goto alloc_fail; |
| 841 | } |
| 842 | |
| 843 | /* Alloc completion queue */ |
| 844 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) { |
| 845 | if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len)) |
| 846 | goto alloc_fail; |
| 847 | } |
| 848 | |
| 849 | return 0; |
| 850 | alloc_fail: |
| 851 | nicvf_free_resources(nic); |
| 852 | return -ENOMEM; |
| 853 | } |
| 854 | |
| 855 | int nicvf_set_qset_resources(struct nicvf *nic) |
| 856 | { |
| 857 | struct queue_set *qs; |
| 858 | |
| 859 | qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL); |
| 860 | if (!qs) |
| 861 | return -ENOMEM; |
| 862 | nic->qs = qs; |
| 863 | |
| 864 | /* Set count of each queue */ |
Sunil Goutham | 3a397eb | 2016-08-12 16:51:27 +0530 | [diff] [blame] | 865 | qs->rbdr_cnt = DEFAULT_RBDR_CNT; |
| 866 | qs->rq_cnt = min_t(u8, MAX_RCV_QUEUES_PER_QS, num_online_cpus()); |
| 867 | qs->sq_cnt = min_t(u8, MAX_SND_QUEUES_PER_QS, num_online_cpus()); |
| 868 | qs->cq_cnt = max_t(u8, qs->rq_cnt, qs->sq_cnt); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 869 | |
| 870 | /* Set queue lengths */ |
| 871 | qs->rbdr_len = RCV_BUF_COUNT; |
| 872 | qs->sq_len = SND_QUEUE_LEN; |
| 873 | qs->cq_len = CMP_QUEUE_LEN; |
Sunil Goutham | 92dc876 | 2015-08-30 12:29:15 +0300 | [diff] [blame] | 874 | |
| 875 | nic->rx_queues = qs->rq_cnt; |
| 876 | nic->tx_queues = qs->sq_cnt; |
| 877 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 878 | return 0; |
| 879 | } |
| 880 | |
| 881 | int nicvf_config_data_transfer(struct nicvf *nic, bool enable) |
| 882 | { |
| 883 | bool disable = false; |
| 884 | struct queue_set *qs = nic->qs; |
Sunil Goutham | fff4ffd | 2017-01-25 17:36:23 +0530 | [diff] [blame] | 885 | struct queue_set *pqs = nic->pnicvf->qs; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 886 | int qidx; |
| 887 | |
| 888 | if (!qs) |
| 889 | return 0; |
| 890 | |
Sunil Goutham | fff4ffd | 2017-01-25 17:36:23 +0530 | [diff] [blame] | 891 | /* Take primary VF's queue lengths. |
| 892 | * This is needed to take queue lengths set from ethtool |
| 893 | * into consideration. |
| 894 | */ |
| 895 | if (nic->sqs_mode && pqs) { |
| 896 | qs->cq_len = pqs->cq_len; |
| 897 | qs->sq_len = pqs->sq_len; |
| 898 | } |
| 899 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 900 | if (enable) { |
| 901 | if (nicvf_alloc_resources(nic)) |
| 902 | return -ENOMEM; |
| 903 | |
| 904 | for (qidx = 0; qidx < qs->sq_cnt; qidx++) |
| 905 | nicvf_snd_queue_config(nic, qs, qidx, enable); |
| 906 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) |
| 907 | nicvf_cmp_queue_config(nic, qs, qidx, enable); |
| 908 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) |
| 909 | nicvf_rbdr_config(nic, qs, qidx, enable); |
| 910 | for (qidx = 0; qidx < qs->rq_cnt; qidx++) |
| 911 | nicvf_rcv_queue_config(nic, qs, qidx, enable); |
| 912 | } else { |
| 913 | for (qidx = 0; qidx < qs->rq_cnt; qidx++) |
| 914 | nicvf_rcv_queue_config(nic, qs, qidx, disable); |
| 915 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) |
| 916 | nicvf_rbdr_config(nic, qs, qidx, disable); |
| 917 | for (qidx = 0; qidx < qs->sq_cnt; qidx++) |
| 918 | nicvf_snd_queue_config(nic, qs, qidx, disable); |
| 919 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) |
| 920 | nicvf_cmp_queue_config(nic, qs, qidx, disable); |
| 921 | |
| 922 | nicvf_free_resources(nic); |
| 923 | } |
| 924 | |
Jerin Jacob | 3458c40 | 2016-08-12 16:51:39 +0530 | [diff] [blame] | 925 | /* Reset RXQ's stats. |
| 926 | * SQ's stats will get reset automatically once SQ is reset. |
| 927 | */ |
| 928 | nicvf_reset_rcv_queue_stats(nic); |
| 929 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 930 | return 0; |
| 931 | } |
| 932 | |
| 933 | /* Get a free desc from SQ |
| 934 | * returns descriptor ponter & descriptor number |
| 935 | */ |
| 936 | static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt) |
| 937 | { |
| 938 | int qentry; |
| 939 | |
| 940 | qentry = sq->tail; |
| 941 | atomic_sub(desc_cnt, &sq->free_cnt); |
| 942 | sq->tail += desc_cnt; |
| 943 | sq->tail &= (sq->dmem.q_len - 1); |
| 944 | |
| 945 | return qentry; |
| 946 | } |
| 947 | |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame^] | 948 | /* Rollback to previous tail pointer when descriptors not used */ |
| 949 | static inline void nicvf_rollback_sq_desc(struct snd_queue *sq, |
| 950 | int qentry, int desc_cnt) |
| 951 | { |
| 952 | sq->tail = qentry; |
| 953 | atomic_add(desc_cnt, &sq->free_cnt); |
| 954 | } |
| 955 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 956 | /* Free descriptor back to SQ for future use */ |
| 957 | void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt) |
| 958 | { |
| 959 | atomic_add(desc_cnt, &sq->free_cnt); |
| 960 | sq->head += desc_cnt; |
| 961 | sq->head &= (sq->dmem.q_len - 1); |
| 962 | } |
| 963 | |
| 964 | static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry) |
| 965 | { |
| 966 | qentry++; |
| 967 | qentry &= (sq->dmem.q_len - 1); |
| 968 | return qentry; |
| 969 | } |
| 970 | |
| 971 | void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx) |
| 972 | { |
| 973 | u64 sq_cfg; |
| 974 | |
| 975 | sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); |
| 976 | sq_cfg |= NICVF_SQ_EN; |
| 977 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); |
| 978 | /* Ring doorbell so that H/W restarts processing SQEs */ |
| 979 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0); |
| 980 | } |
| 981 | |
| 982 | void nicvf_sq_disable(struct nicvf *nic, int qidx) |
| 983 | { |
| 984 | u64 sq_cfg; |
| 985 | |
| 986 | sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); |
| 987 | sq_cfg &= ~NICVF_SQ_EN; |
| 988 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); |
| 989 | } |
| 990 | |
| 991 | void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq, |
| 992 | int qidx) |
| 993 | { |
| 994 | u64 head, tail; |
| 995 | struct sk_buff *skb; |
| 996 | struct nicvf *nic = netdev_priv(netdev); |
| 997 | struct sq_hdr_subdesc *hdr; |
| 998 | |
| 999 | head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4; |
| 1000 | tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4; |
| 1001 | while (sq->head != head) { |
| 1002 | hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); |
| 1003 | if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) { |
| 1004 | nicvf_put_sq_desc(sq, 1); |
| 1005 | continue; |
| 1006 | } |
| 1007 | skb = (struct sk_buff *)sq->skbuff[sq->head]; |
Sunil Goutham | 143ceb0 | 2015-07-29 16:49:37 +0300 | [diff] [blame] | 1008 | if (skb) |
| 1009 | dev_kfree_skb_any(skb); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1010 | atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets); |
| 1011 | atomic64_add(hdr->tot_len, |
| 1012 | (atomic64_t *)&netdev->stats.tx_bytes); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1013 | nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); |
| 1014 | } |
| 1015 | } |
| 1016 | |
| 1017 | /* Calculate no of SQ subdescriptors needed to transmit all |
| 1018 | * segments of this TSO packet. |
| 1019 | * Taken from 'Tilera network driver' with a minor modification. |
| 1020 | */ |
| 1021 | static int nicvf_tso_count_subdescs(struct sk_buff *skb) |
| 1022 | { |
| 1023 | struct skb_shared_info *sh = skb_shinfo(skb); |
| 1024 | unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
| 1025 | unsigned int data_len = skb->len - sh_len; |
| 1026 | unsigned int p_len = sh->gso_size; |
| 1027 | long f_id = -1; /* id of the current fragment */ |
| 1028 | long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ |
| 1029 | long f_used = 0; /* bytes used from the current fragment */ |
| 1030 | long n; /* size of the current piece of payload */ |
| 1031 | int num_edescs = 0; |
| 1032 | int segment; |
| 1033 | |
| 1034 | for (segment = 0; segment < sh->gso_segs; segment++) { |
| 1035 | unsigned int p_used = 0; |
| 1036 | |
| 1037 | /* One edesc for header and for each piece of the payload. */ |
| 1038 | for (num_edescs++; p_used < p_len; num_edescs++) { |
| 1039 | /* Advance as needed. */ |
| 1040 | while (f_used >= f_size) { |
| 1041 | f_id++; |
| 1042 | f_size = skb_frag_size(&sh->frags[f_id]); |
| 1043 | f_used = 0; |
| 1044 | } |
| 1045 | |
| 1046 | /* Use bytes from the current fragment. */ |
| 1047 | n = p_len - p_used; |
| 1048 | if (n > f_size - f_used) |
| 1049 | n = f_size - f_used; |
| 1050 | f_used += n; |
| 1051 | p_used += n; |
| 1052 | } |
| 1053 | |
| 1054 | /* The last segment may be less than gso_size. */ |
| 1055 | data_len -= p_len; |
| 1056 | if (data_len < p_len) |
| 1057 | p_len = data_len; |
| 1058 | } |
| 1059 | |
| 1060 | /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */ |
| 1061 | return num_edescs + sh->gso_segs; |
| 1062 | } |
| 1063 | |
Sunil Goutham | 7ceb8a1 | 2016-08-30 11:36:27 +0530 | [diff] [blame] | 1064 | #define POST_CQE_DESC_COUNT 2 |
| 1065 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1066 | /* Get the number of SQ descriptors needed to xmit this skb */ |
| 1067 | static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb) |
| 1068 | { |
| 1069 | int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT; |
| 1070 | |
Sunil Goutham | 40fb5f8 | 2015-12-10 13:25:19 +0530 | [diff] [blame] | 1071 | if (skb_shinfo(skb)->gso_size && !nic->hw_tso) { |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1072 | subdesc_cnt = nicvf_tso_count_subdescs(skb); |
| 1073 | return subdesc_cnt; |
| 1074 | } |
| 1075 | |
Sunil Goutham | 7ceb8a1 | 2016-08-30 11:36:27 +0530 | [diff] [blame] | 1076 | /* Dummy descriptors to get TSO pkt completion notification */ |
| 1077 | if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) |
| 1078 | subdesc_cnt += POST_CQE_DESC_COUNT; |
| 1079 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1080 | if (skb_shinfo(skb)->nr_frags) |
| 1081 | subdesc_cnt += skb_shinfo(skb)->nr_frags; |
| 1082 | |
| 1083 | return subdesc_cnt; |
| 1084 | } |
| 1085 | |
| 1086 | /* Add SQ HEADER subdescriptor. |
| 1087 | * First subdescriptor for every send descriptor. |
| 1088 | */ |
| 1089 | static inline void |
Sunil Goutham | 40fb5f8 | 2015-12-10 13:25:19 +0530 | [diff] [blame] | 1090 | nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry, |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1091 | int subdesc_cnt, struct sk_buff *skb, int len) |
| 1092 | { |
| 1093 | int proto; |
| 1094 | struct sq_hdr_subdesc *hdr; |
| 1095 | |
| 1096 | hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1097 | memset(hdr, 0, SND_QUEUE_DESC_SIZE); |
| 1098 | hdr->subdesc_type = SQ_DESC_TYPE_HEADER; |
Sunil Goutham | 7ceb8a1 | 2016-08-30 11:36:27 +0530 | [diff] [blame] | 1099 | |
| 1100 | if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) { |
| 1101 | /* post_cqe = 0, to avoid HW posting a CQE for every TSO |
| 1102 | * segment transmitted on 88xx. |
| 1103 | */ |
| 1104 | hdr->subdesc_cnt = subdesc_cnt - POST_CQE_DESC_COUNT; |
| 1105 | } else { |
| 1106 | sq->skbuff[qentry] = (u64)skb; |
| 1107 | /* Enable notification via CQE after processing SQE */ |
| 1108 | hdr->post_cqe = 1; |
| 1109 | /* No of subdescriptors following this */ |
| 1110 | hdr->subdesc_cnt = subdesc_cnt; |
| 1111 | } |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1112 | hdr->tot_len = len; |
| 1113 | |
| 1114 | /* Offload checksum calculation to HW */ |
| 1115 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1116 | hdr->csum_l3 = 1; /* Enable IP csum calculation */ |
| 1117 | hdr->l3_offset = skb_network_offset(skb); |
| 1118 | hdr->l4_offset = skb_transport_offset(skb); |
| 1119 | |
| 1120 | proto = ip_hdr(skb)->protocol; |
| 1121 | switch (proto) { |
| 1122 | case IPPROTO_TCP: |
| 1123 | hdr->csum_l4 = SEND_L4_CSUM_TCP; |
| 1124 | break; |
| 1125 | case IPPROTO_UDP: |
| 1126 | hdr->csum_l4 = SEND_L4_CSUM_UDP; |
| 1127 | break; |
| 1128 | case IPPROTO_SCTP: |
| 1129 | hdr->csum_l4 = SEND_L4_CSUM_SCTP; |
| 1130 | break; |
| 1131 | } |
| 1132 | } |
Sunil Goutham | 40fb5f8 | 2015-12-10 13:25:19 +0530 | [diff] [blame] | 1133 | |
| 1134 | if (nic->hw_tso && skb_shinfo(skb)->gso_size) { |
| 1135 | hdr->tso = 1; |
| 1136 | hdr->tso_start = skb_transport_offset(skb) + tcp_hdrlen(skb); |
| 1137 | hdr->tso_max_paysize = skb_shinfo(skb)->gso_size; |
| 1138 | /* For non-tunneled pkts, point this to L2 ethertype */ |
| 1139 | hdr->inner_l3_offset = skb_network_offset(skb) - 2; |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1140 | this_cpu_inc(nic->pnicvf->drv_stats->tx_tso); |
Sunil Goutham | 40fb5f8 | 2015-12-10 13:25:19 +0530 | [diff] [blame] | 1141 | } |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1142 | } |
| 1143 | |
| 1144 | /* SQ GATHER subdescriptor |
| 1145 | * Must follow HDR descriptor |
| 1146 | */ |
| 1147 | static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, |
| 1148 | int size, u64 data) |
| 1149 | { |
| 1150 | struct sq_gather_subdesc *gather; |
| 1151 | |
| 1152 | qentry &= (sq->dmem.q_len - 1); |
| 1153 | gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry); |
| 1154 | |
| 1155 | memset(gather, 0, SND_QUEUE_DESC_SIZE); |
| 1156 | gather->subdesc_type = SQ_DESC_TYPE_GATHER; |
Sunil Goutham | 4b561c1 | 2015-07-29 16:49:36 +0300 | [diff] [blame] | 1157 | gather->ld_type = NIC_SEND_LD_TYPE_E_LDD; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1158 | gather->size = size; |
| 1159 | gather->addr = data; |
| 1160 | } |
| 1161 | |
Sunil Goutham | 7ceb8a1 | 2016-08-30 11:36:27 +0530 | [diff] [blame] | 1162 | /* Add HDR + IMMEDIATE subdescriptors right after descriptors of a TSO |
| 1163 | * packet so that a CQE is posted as a notifation for transmission of |
| 1164 | * TSO packet. |
| 1165 | */ |
| 1166 | static inline void nicvf_sq_add_cqe_subdesc(struct snd_queue *sq, int qentry, |
| 1167 | int tso_sqe, struct sk_buff *skb) |
| 1168 | { |
| 1169 | struct sq_imm_subdesc *imm; |
| 1170 | struct sq_hdr_subdesc *hdr; |
| 1171 | |
| 1172 | sq->skbuff[qentry] = (u64)skb; |
| 1173 | |
| 1174 | hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); |
| 1175 | memset(hdr, 0, SND_QUEUE_DESC_SIZE); |
| 1176 | hdr->subdesc_type = SQ_DESC_TYPE_HEADER; |
| 1177 | /* Enable notification via CQE after processing SQE */ |
| 1178 | hdr->post_cqe = 1; |
| 1179 | /* There is no packet to transmit here */ |
| 1180 | hdr->dont_send = 1; |
| 1181 | hdr->subdesc_cnt = POST_CQE_DESC_COUNT - 1; |
| 1182 | hdr->tot_len = 1; |
| 1183 | /* Actual TSO header SQE index, needed for cleanup */ |
| 1184 | hdr->rsvd2 = tso_sqe; |
| 1185 | |
| 1186 | qentry = nicvf_get_nxt_sqentry(sq, qentry); |
| 1187 | imm = (struct sq_imm_subdesc *)GET_SQ_DESC(sq, qentry); |
| 1188 | memset(imm, 0, SND_QUEUE_DESC_SIZE); |
| 1189 | imm->subdesc_type = SQ_DESC_TYPE_IMMEDIATE; |
| 1190 | imm->len = 1; |
| 1191 | } |
| 1192 | |
Sunil Goutham | 2c204c2 | 2016-09-23 14:42:28 +0530 | [diff] [blame] | 1193 | static inline void nicvf_sq_doorbell(struct nicvf *nic, struct sk_buff *skb, |
| 1194 | int sq_num, int desc_cnt) |
| 1195 | { |
| 1196 | struct netdev_queue *txq; |
| 1197 | |
| 1198 | txq = netdev_get_tx_queue(nic->pnicvf->netdev, |
| 1199 | skb_get_queue_mapping(skb)); |
| 1200 | |
| 1201 | netdev_tx_sent_queue(txq, skb->len); |
| 1202 | |
| 1203 | /* make sure all memory stores are done before ringing doorbell */ |
| 1204 | smp_wmb(); |
| 1205 | |
| 1206 | /* Inform HW to xmit all TSO segments */ |
| 1207 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, |
| 1208 | sq_num, desc_cnt); |
| 1209 | } |
| 1210 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1211 | /* Segment a TSO packet into 'gso_size' segments and append |
| 1212 | * them to SQ for transfer |
| 1213 | */ |
| 1214 | static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, |
Sunil Goutham | 92dc876 | 2015-08-30 12:29:15 +0300 | [diff] [blame] | 1215 | int sq_num, int qentry, struct sk_buff *skb) |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1216 | { |
| 1217 | struct tso_t tso; |
| 1218 | int seg_subdescs = 0, desc_cnt = 0; |
| 1219 | int seg_len, total_len, data_left; |
| 1220 | int hdr_qentry = qentry; |
| 1221 | int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
| 1222 | |
| 1223 | tso_start(skb, &tso); |
| 1224 | total_len = skb->len - hdr_len; |
| 1225 | while (total_len > 0) { |
| 1226 | char *hdr; |
| 1227 | |
| 1228 | /* Save Qentry for adding HDR_SUBDESC at the end */ |
| 1229 | hdr_qentry = qentry; |
| 1230 | |
| 1231 | data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); |
| 1232 | total_len -= data_left; |
| 1233 | |
| 1234 | /* Add segment's header */ |
| 1235 | qentry = nicvf_get_nxt_sqentry(sq, qentry); |
| 1236 | hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE; |
| 1237 | tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); |
| 1238 | nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len, |
| 1239 | sq->tso_hdrs_phys + |
| 1240 | qentry * TSO_HEADER_SIZE); |
| 1241 | /* HDR_SUDESC + GATHER */ |
| 1242 | seg_subdescs = 2; |
| 1243 | seg_len = hdr_len; |
| 1244 | |
| 1245 | /* Add segment's payload fragments */ |
| 1246 | while (data_left > 0) { |
| 1247 | int size; |
| 1248 | |
| 1249 | size = min_t(int, tso.size, data_left); |
| 1250 | |
| 1251 | qentry = nicvf_get_nxt_sqentry(sq, qentry); |
| 1252 | nicvf_sq_add_gather_subdesc(sq, qentry, size, |
| 1253 | virt_to_phys(tso.data)); |
| 1254 | seg_subdescs++; |
| 1255 | seg_len += size; |
| 1256 | |
| 1257 | data_left -= size; |
| 1258 | tso_build_data(skb, &tso, size); |
| 1259 | } |
Sunil Goutham | 40fb5f8 | 2015-12-10 13:25:19 +0530 | [diff] [blame] | 1260 | nicvf_sq_add_hdr_subdesc(nic, sq, hdr_qentry, |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1261 | seg_subdescs - 1, skb, seg_len); |
Sunil Goutham | 143ceb0 | 2015-07-29 16:49:37 +0300 | [diff] [blame] | 1262 | sq->skbuff[hdr_qentry] = (u64)NULL; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1263 | qentry = nicvf_get_nxt_sqentry(sq, qentry); |
| 1264 | |
| 1265 | desc_cnt += seg_subdescs; |
| 1266 | } |
| 1267 | /* Save SKB in the last segment for freeing */ |
| 1268 | sq->skbuff[hdr_qentry] = (u64)skb; |
| 1269 | |
Sunil Goutham | 2c204c2 | 2016-09-23 14:42:28 +0530 | [diff] [blame] | 1270 | nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1271 | |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1272 | this_cpu_inc(nic->pnicvf->drv_stats->tx_tso); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1273 | return 1; |
| 1274 | } |
| 1275 | |
| 1276 | /* Append an skb to a SQ for packet transfer. */ |
Sunil Goutham | bd3ad7d | 2016-12-01 18:24:28 +0530 | [diff] [blame] | 1277 | int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq, |
| 1278 | struct sk_buff *skb, u8 sq_num) |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1279 | { |
| 1280 | int i, size; |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame^] | 1281 | int subdesc_cnt, hdr_sqe = 0; |
Sunil Goutham | bd3ad7d | 2016-12-01 18:24:28 +0530 | [diff] [blame] | 1282 | int qentry; |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame^] | 1283 | u64 dma_addr; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1284 | |
| 1285 | subdesc_cnt = nicvf_sq_subdesc_required(nic, skb); |
| 1286 | if (subdesc_cnt > atomic_read(&sq->free_cnt)) |
| 1287 | goto append_fail; |
| 1288 | |
| 1289 | qentry = nicvf_get_sq_desc(sq, subdesc_cnt); |
| 1290 | |
| 1291 | /* Check if its a TSO packet */ |
Sunil Goutham | 40fb5f8 | 2015-12-10 13:25:19 +0530 | [diff] [blame] | 1292 | if (skb_shinfo(skb)->gso_size && !nic->hw_tso) |
Sunil Goutham | 92dc876 | 2015-08-30 12:29:15 +0300 | [diff] [blame] | 1293 | return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1294 | |
| 1295 | /* Add SQ header subdesc */ |
Sunil Goutham | 40fb5f8 | 2015-12-10 13:25:19 +0530 | [diff] [blame] | 1296 | nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1, |
| 1297 | skb, skb->len); |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame^] | 1298 | hdr_sqe = qentry; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1299 | |
| 1300 | /* Add SQ gather subdescs */ |
| 1301 | qentry = nicvf_get_nxt_sqentry(sq, qentry); |
| 1302 | size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame^] | 1303 | /* HW will ensure data coherency, CPU sync not required */ |
| 1304 | dma_addr = dma_map_page_attrs(&nic->pdev->dev, virt_to_page(skb->data), |
| 1305 | offset_in_page(skb->data), size, |
| 1306 | DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); |
| 1307 | if (dma_mapping_error(&nic->pdev->dev, dma_addr)) { |
| 1308 | nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt); |
| 1309 | return 0; |
| 1310 | } |
| 1311 | |
| 1312 | nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1313 | |
| 1314 | /* Check for scattered buffer */ |
| 1315 | if (!skb_is_nonlinear(skb)) |
| 1316 | goto doorbell; |
| 1317 | |
| 1318 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
| 1319 | const struct skb_frag_struct *frag; |
| 1320 | |
| 1321 | frag = &skb_shinfo(skb)->frags[i]; |
| 1322 | |
| 1323 | qentry = nicvf_get_nxt_sqentry(sq, qentry); |
| 1324 | size = skb_frag_size(frag); |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame^] | 1325 | dma_addr = dma_map_page_attrs(&nic->pdev->dev, |
| 1326 | skb_frag_page(frag), |
| 1327 | frag->page_offset, size, |
| 1328 | DMA_TO_DEVICE, |
| 1329 | DMA_ATTR_SKIP_CPU_SYNC); |
| 1330 | if (dma_mapping_error(&nic->pdev->dev, dma_addr)) { |
| 1331 | /* Free entire chain of mapped buffers |
| 1332 | * here 'i' = frags mapped + above mapped skb->data |
| 1333 | */ |
| 1334 | nicvf_unmap_sndq_buffers(nic, sq, hdr_sqe, i); |
| 1335 | nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt); |
| 1336 | return 0; |
| 1337 | } |
| 1338 | nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1339 | } |
| 1340 | |
| 1341 | doorbell: |
Sunil Goutham | 7ceb8a1 | 2016-08-30 11:36:27 +0530 | [diff] [blame] | 1342 | if (nic->t88 && skb_shinfo(skb)->gso_size) { |
| 1343 | qentry = nicvf_get_nxt_sqentry(sq, qentry); |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame^] | 1344 | nicvf_sq_add_cqe_subdesc(sq, qentry, hdr_sqe, skb); |
Sunil Goutham | 7ceb8a1 | 2016-08-30 11:36:27 +0530 | [diff] [blame] | 1345 | } |
| 1346 | |
Sunil Goutham | 2c204c2 | 2016-09-23 14:42:28 +0530 | [diff] [blame] | 1347 | nicvf_sq_doorbell(nic, skb, sq_num, subdesc_cnt); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1348 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1349 | return 1; |
| 1350 | |
| 1351 | append_fail: |
Sunil Goutham | 92dc876 | 2015-08-30 12:29:15 +0300 | [diff] [blame] | 1352 | /* Use original PCI dev for debug log */ |
| 1353 | nic = nic->pnicvf; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1354 | netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n"); |
| 1355 | return 0; |
| 1356 | } |
| 1357 | |
| 1358 | static inline unsigned frag_num(unsigned i) |
| 1359 | { |
| 1360 | #ifdef __BIG_ENDIAN |
| 1361 | return (i & ~3) + 3 - (i & 3); |
| 1362 | #else |
| 1363 | return i; |
| 1364 | #endif |
| 1365 | } |
| 1366 | |
| 1367 | /* Returns SKB for a received packet */ |
| 1368 | struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx) |
| 1369 | { |
| 1370 | int frag; |
| 1371 | int payload_len = 0; |
| 1372 | struct sk_buff *skb = NULL; |
Sunil Goutham | a8671ac | 2016-08-12 16:51:37 +0530 | [diff] [blame] | 1373 | struct page *page; |
| 1374 | int offset; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1375 | u16 *rb_lens = NULL; |
| 1376 | u64 *rb_ptrs = NULL; |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame^] | 1377 | u64 phys_addr; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1378 | |
| 1379 | rb_lens = (void *)cqe_rx + (3 * sizeof(u64)); |
Sunil Goutham | 02a72bd | 2016-08-12 16:51:28 +0530 | [diff] [blame] | 1380 | /* Except 88xx pass1 on all other chips CQE_RX2_S is added to |
| 1381 | * CQE_RX at word6, hence buffer pointers move by word |
| 1382 | * |
| 1383 | * Use existing 'hw_tso' flag which will be set for all chips |
| 1384 | * except 88xx pass1 instead of a additional cache line |
| 1385 | * access (or miss) by using pci dev's revision. |
| 1386 | */ |
| 1387 | if (!nic->hw_tso) |
| 1388 | rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64)); |
| 1389 | else |
| 1390 | rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64)); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1391 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1392 | for (frag = 0; frag < cqe_rx->rb_cnt; frag++) { |
| 1393 | payload_len = rb_lens[frag_num(frag)]; |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame^] | 1394 | phys_addr = nicvf_iova_to_phys(nic, *rb_ptrs); |
| 1395 | if (!phys_addr) { |
| 1396 | if (skb) |
| 1397 | dev_kfree_skb_any(skb); |
| 1398 | return NULL; |
| 1399 | } |
| 1400 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1401 | if (!frag) { |
| 1402 | /* First fragment */ |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame^] | 1403 | dma_unmap_page_attrs(&nic->pdev->dev, |
| 1404 | *rb_ptrs - cqe_rx->align_pad, |
| 1405 | RCV_FRAG_LEN, DMA_FROM_DEVICE, |
| 1406 | DMA_ATTR_SKIP_CPU_SYNC); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1407 | skb = nicvf_rb_ptr_to_skb(nic, |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame^] | 1408 | phys_addr - cqe_rx->align_pad, |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1409 | payload_len); |
| 1410 | if (!skb) |
| 1411 | return NULL; |
| 1412 | skb_reserve(skb, cqe_rx->align_pad); |
| 1413 | skb_put(skb, payload_len); |
| 1414 | } else { |
| 1415 | /* Add fragments */ |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame^] | 1416 | dma_unmap_page_attrs(&nic->pdev->dev, *rb_ptrs, |
| 1417 | RCV_FRAG_LEN, DMA_FROM_DEVICE, |
| 1418 | DMA_ATTR_SKIP_CPU_SYNC); |
| 1419 | page = virt_to_page(phys_to_virt(phys_addr)); |
| 1420 | offset = phys_to_virt(phys_addr) - page_address(page); |
Sunil Goutham | a8671ac | 2016-08-12 16:51:37 +0530 | [diff] [blame] | 1421 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, |
| 1422 | offset, payload_len, RCV_FRAG_LEN); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1423 | } |
| 1424 | /* Next buffer pointer */ |
| 1425 | rb_ptrs++; |
| 1426 | } |
| 1427 | return skb; |
| 1428 | } |
| 1429 | |
Yury Norov | b45ceb4 | 2015-12-07 10:30:32 +0530 | [diff] [blame] | 1430 | static u64 nicvf_int_type_to_mask(int int_type, int q_idx) |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1431 | { |
| 1432 | u64 reg_val; |
| 1433 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1434 | switch (int_type) { |
| 1435 | case NICVF_INTR_CQ: |
| 1436 | reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); |
| 1437 | break; |
| 1438 | case NICVF_INTR_SQ: |
| 1439 | reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); |
| 1440 | break; |
| 1441 | case NICVF_INTR_RBDR: |
| 1442 | reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); |
| 1443 | break; |
| 1444 | case NICVF_INTR_PKT_DROP: |
| 1445 | reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT); |
| 1446 | break; |
| 1447 | case NICVF_INTR_TCP_TIMER: |
| 1448 | reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); |
| 1449 | break; |
| 1450 | case NICVF_INTR_MBOX: |
| 1451 | reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT); |
| 1452 | break; |
| 1453 | case NICVF_INTR_QS_ERR: |
Yury Norov | b45ceb4 | 2015-12-07 10:30:32 +0530 | [diff] [blame] | 1454 | reg_val = (1ULL << NICVF_INTR_QS_ERR_SHIFT); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1455 | break; |
| 1456 | default: |
Yury Norov | b45ceb4 | 2015-12-07 10:30:32 +0530 | [diff] [blame] | 1457 | reg_val = 0; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1458 | } |
| 1459 | |
Yury Norov | b45ceb4 | 2015-12-07 10:30:32 +0530 | [diff] [blame] | 1460 | return reg_val; |
| 1461 | } |
| 1462 | |
| 1463 | /* Enable interrupt */ |
| 1464 | void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) |
| 1465 | { |
| 1466 | u64 mask = nicvf_int_type_to_mask(int_type, q_idx); |
| 1467 | |
| 1468 | if (!mask) { |
| 1469 | netdev_dbg(nic->netdev, |
| 1470 | "Failed to enable interrupt: unknown type\n"); |
| 1471 | return; |
| 1472 | } |
| 1473 | nicvf_reg_write(nic, NIC_VF_ENA_W1S, |
| 1474 | nicvf_reg_read(nic, NIC_VF_ENA_W1S) | mask); |
| 1475 | } |
| 1476 | |
| 1477 | /* Disable interrupt */ |
| 1478 | void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx) |
| 1479 | { |
| 1480 | u64 mask = nicvf_int_type_to_mask(int_type, q_idx); |
| 1481 | |
| 1482 | if (!mask) { |
| 1483 | netdev_dbg(nic->netdev, |
| 1484 | "Failed to disable interrupt: unknown type\n"); |
| 1485 | return; |
| 1486 | } |
| 1487 | |
| 1488 | nicvf_reg_write(nic, NIC_VF_ENA_W1C, mask); |
| 1489 | } |
| 1490 | |
| 1491 | /* Clear interrupt */ |
| 1492 | void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx) |
| 1493 | { |
| 1494 | u64 mask = nicvf_int_type_to_mask(int_type, q_idx); |
| 1495 | |
| 1496 | if (!mask) { |
| 1497 | netdev_dbg(nic->netdev, |
| 1498 | "Failed to clear interrupt: unknown type\n"); |
| 1499 | return; |
| 1500 | } |
| 1501 | |
| 1502 | nicvf_reg_write(nic, NIC_VF_INT, mask); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1503 | } |
| 1504 | |
| 1505 | /* Check if interrupt is enabled */ |
| 1506 | int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx) |
| 1507 | { |
Yury Norov | b45ceb4 | 2015-12-07 10:30:32 +0530 | [diff] [blame] | 1508 | u64 mask = nicvf_int_type_to_mask(int_type, q_idx); |
| 1509 | /* If interrupt type is unknown, we treat it disabled. */ |
| 1510 | if (!mask) { |
| 1511 | netdev_dbg(nic->netdev, |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1512 | "Failed to check interrupt enable: unknown type\n"); |
Yury Norov | b45ceb4 | 2015-12-07 10:30:32 +0530 | [diff] [blame] | 1513 | return 0; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1514 | } |
| 1515 | |
Yury Norov | b45ceb4 | 2015-12-07 10:30:32 +0530 | [diff] [blame] | 1516 | return mask & nicvf_reg_read(nic, NIC_VF_ENA_W1S); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1517 | } |
| 1518 | |
| 1519 | void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx) |
| 1520 | { |
| 1521 | struct rcv_queue *rq; |
| 1522 | |
| 1523 | #define GET_RQ_STATS(reg) \ |
| 1524 | nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\ |
| 1525 | (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) |
| 1526 | |
| 1527 | rq = &nic->qs->rq[rq_idx]; |
| 1528 | rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS); |
| 1529 | rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS); |
| 1530 | } |
| 1531 | |
| 1532 | void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx) |
| 1533 | { |
| 1534 | struct snd_queue *sq; |
| 1535 | |
| 1536 | #define GET_SQ_STATS(reg) \ |
| 1537 | nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\ |
| 1538 | (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) |
| 1539 | |
| 1540 | sq = &nic->qs->sq[sq_idx]; |
| 1541 | sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS); |
| 1542 | sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS); |
| 1543 | } |
| 1544 | |
| 1545 | /* Check for errors in the receive cmp.queue entry */ |
Sunil Goutham | ad2eceb | 2016-02-16 16:29:51 +0530 | [diff] [blame] | 1546 | int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx) |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1547 | { |
Sunil Goutham | ad2eceb | 2016-02-16 16:29:51 +0530 | [diff] [blame] | 1548 | if (!cqe_rx->err_level && !cqe_rx->err_opcode) |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1549 | return 0; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1550 | |
| 1551 | if (netif_msg_rx_err(nic)) |
| 1552 | netdev_err(nic->netdev, |
| 1553 | "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n", |
| 1554 | nic->netdev->name, |
| 1555 | cqe_rx->err_level, cqe_rx->err_opcode); |
| 1556 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1557 | switch (cqe_rx->err_opcode) { |
| 1558 | case CQ_RX_ERROP_RE_PARTIAL: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1559 | this_cpu_inc(nic->drv_stats->rx_bgx_truncated_pkts); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1560 | break; |
| 1561 | case CQ_RX_ERROP_RE_JABBER: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1562 | this_cpu_inc(nic->drv_stats->rx_jabber_errs); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1563 | break; |
| 1564 | case CQ_RX_ERROP_RE_FCS: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1565 | this_cpu_inc(nic->drv_stats->rx_fcs_errs); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1566 | break; |
| 1567 | case CQ_RX_ERROP_RE_RX_CTL: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1568 | this_cpu_inc(nic->drv_stats->rx_bgx_errs); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1569 | break; |
| 1570 | case CQ_RX_ERROP_PREL2_ERR: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1571 | this_cpu_inc(nic->drv_stats->rx_prel2_errs); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1572 | break; |
| 1573 | case CQ_RX_ERROP_L2_MAL: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1574 | this_cpu_inc(nic->drv_stats->rx_l2_hdr_malformed); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1575 | break; |
| 1576 | case CQ_RX_ERROP_L2_OVERSIZE: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1577 | this_cpu_inc(nic->drv_stats->rx_oversize); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1578 | break; |
| 1579 | case CQ_RX_ERROP_L2_UNDERSIZE: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1580 | this_cpu_inc(nic->drv_stats->rx_undersize); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1581 | break; |
| 1582 | case CQ_RX_ERROP_L2_LENMISM: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1583 | this_cpu_inc(nic->drv_stats->rx_l2_len_mismatch); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1584 | break; |
| 1585 | case CQ_RX_ERROP_L2_PCLP: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1586 | this_cpu_inc(nic->drv_stats->rx_l2_pclp); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1587 | break; |
| 1588 | case CQ_RX_ERROP_IP_NOT: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1589 | this_cpu_inc(nic->drv_stats->rx_ip_ver_errs); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1590 | break; |
| 1591 | case CQ_RX_ERROP_IP_CSUM_ERR: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1592 | this_cpu_inc(nic->drv_stats->rx_ip_csum_errs); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1593 | break; |
| 1594 | case CQ_RX_ERROP_IP_MAL: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1595 | this_cpu_inc(nic->drv_stats->rx_ip_hdr_malformed); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1596 | break; |
| 1597 | case CQ_RX_ERROP_IP_MALD: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1598 | this_cpu_inc(nic->drv_stats->rx_ip_payload_malformed); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1599 | break; |
| 1600 | case CQ_RX_ERROP_IP_HOP: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1601 | this_cpu_inc(nic->drv_stats->rx_ip_ttl_errs); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1602 | break; |
| 1603 | case CQ_RX_ERROP_L3_PCLP: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1604 | this_cpu_inc(nic->drv_stats->rx_l3_pclp); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1605 | break; |
| 1606 | case CQ_RX_ERROP_L4_MAL: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1607 | this_cpu_inc(nic->drv_stats->rx_l4_malformed); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1608 | break; |
| 1609 | case CQ_RX_ERROP_L4_CHK: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1610 | this_cpu_inc(nic->drv_stats->rx_l4_csum_errs); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1611 | break; |
| 1612 | case CQ_RX_ERROP_UDP_LEN: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1613 | this_cpu_inc(nic->drv_stats->rx_udp_len_errs); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1614 | break; |
| 1615 | case CQ_RX_ERROP_L4_PORT: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1616 | this_cpu_inc(nic->drv_stats->rx_l4_port_errs); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1617 | break; |
| 1618 | case CQ_RX_ERROP_TCP_FLAG: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1619 | this_cpu_inc(nic->drv_stats->rx_tcp_flag_errs); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1620 | break; |
| 1621 | case CQ_RX_ERROP_TCP_OFFSET: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1622 | this_cpu_inc(nic->drv_stats->rx_tcp_offset_errs); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1623 | break; |
| 1624 | case CQ_RX_ERROP_L4_PCLP: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1625 | this_cpu_inc(nic->drv_stats->rx_l4_pclp); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1626 | break; |
| 1627 | case CQ_RX_ERROP_RBDR_TRUNC: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1628 | this_cpu_inc(nic->drv_stats->rx_truncated_pkts); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1629 | break; |
| 1630 | } |
| 1631 | |
| 1632 | return 1; |
| 1633 | } |
| 1634 | |
| 1635 | /* Check for errors in the send cmp.queue entry */ |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1636 | int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx) |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1637 | { |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1638 | switch (cqe_tx->send_status) { |
| 1639 | case CQ_TX_ERROP_GOOD: |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1640 | return 0; |
| 1641 | case CQ_TX_ERROP_DESC_FAULT: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1642 | this_cpu_inc(nic->drv_stats->tx_desc_fault); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1643 | break; |
| 1644 | case CQ_TX_ERROP_HDR_CONS_ERR: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1645 | this_cpu_inc(nic->drv_stats->tx_hdr_cons_err); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1646 | break; |
| 1647 | case CQ_TX_ERROP_SUBDC_ERR: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1648 | this_cpu_inc(nic->drv_stats->tx_subdesc_err); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1649 | break; |
Sunil Goutham | 712c318 | 2016-11-15 17:37:36 +0530 | [diff] [blame] | 1650 | case CQ_TX_ERROP_MAX_SIZE_VIOL: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1651 | this_cpu_inc(nic->drv_stats->tx_max_size_exceeded); |
Sunil Goutham | 712c318 | 2016-11-15 17:37:36 +0530 | [diff] [blame] | 1652 | break; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1653 | case CQ_TX_ERROP_IMM_SIZE_OFLOW: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1654 | this_cpu_inc(nic->drv_stats->tx_imm_size_oflow); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1655 | break; |
| 1656 | case CQ_TX_ERROP_DATA_SEQUENCE_ERR: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1657 | this_cpu_inc(nic->drv_stats->tx_data_seq_err); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1658 | break; |
| 1659 | case CQ_TX_ERROP_MEM_SEQUENCE_ERR: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1660 | this_cpu_inc(nic->drv_stats->tx_mem_seq_err); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1661 | break; |
| 1662 | case CQ_TX_ERROP_LOCK_VIOL: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1663 | this_cpu_inc(nic->drv_stats->tx_lock_viol); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1664 | break; |
| 1665 | case CQ_TX_ERROP_DATA_FAULT: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1666 | this_cpu_inc(nic->drv_stats->tx_data_fault); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1667 | break; |
| 1668 | case CQ_TX_ERROP_TSTMP_CONFLICT: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1669 | this_cpu_inc(nic->drv_stats->tx_tstmp_conflict); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1670 | break; |
| 1671 | case CQ_TX_ERROP_TSTMP_TIMEOUT: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1672 | this_cpu_inc(nic->drv_stats->tx_tstmp_timeout); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1673 | break; |
| 1674 | case CQ_TX_ERROP_MEM_FAULT: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1675 | this_cpu_inc(nic->drv_stats->tx_mem_fault); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1676 | break; |
| 1677 | case CQ_TX_ERROP_CK_OVERLAP: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1678 | this_cpu_inc(nic->drv_stats->tx_csum_overlap); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1679 | break; |
| 1680 | case CQ_TX_ERROP_CK_OFLOW: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1681 | this_cpu_inc(nic->drv_stats->tx_csum_overflow); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1682 | break; |
| 1683 | } |
| 1684 | |
| 1685 | return 1; |
| 1686 | } |