Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2015 Cavium, Inc. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify it |
| 5 | * under the terms of version 2 of the GNU General Public License |
| 6 | * as published by the Free Software Foundation. |
| 7 | */ |
| 8 | |
| 9 | #include <linux/pci.h> |
| 10 | #include <linux/netdevice.h> |
| 11 | #include <linux/ip.h> |
| 12 | #include <linux/etherdevice.h> |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame] | 13 | #include <linux/iommu.h> |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 14 | #include <net/ip.h> |
| 15 | #include <net/tso.h> |
| 16 | |
| 17 | #include "nic_reg.h" |
| 18 | #include "nic.h" |
| 19 | #include "q_struct.h" |
| 20 | #include "nicvf_queues.h" |
| 21 | |
Sunil Goutham | 16f2bcc | 2017-05-02 18:36:56 +0530 | [diff] [blame] | 22 | static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, |
| 23 | int size, u64 data); |
Sunil Goutham | 5c2e26f | 2016-03-14 16:36:14 +0530 | [diff] [blame] | 24 | static void nicvf_get_page(struct nicvf *nic) |
| 25 | { |
| 26 | if (!nic->rb_pageref || !nic->rb_page) |
| 27 | return; |
| 28 | |
Joonsoo Kim | 6d061f9 | 2016-05-19 17:10:46 -0700 | [diff] [blame] | 29 | page_ref_add(nic->rb_page, nic->rb_pageref); |
Sunil Goutham | 5c2e26f | 2016-03-14 16:36:14 +0530 | [diff] [blame] | 30 | nic->rb_pageref = 0; |
| 31 | } |
| 32 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 33 | /* Poll a register for a specific value */ |
| 34 | static int nicvf_poll_reg(struct nicvf *nic, int qidx, |
| 35 | u64 reg, int bit_pos, int bits, int val) |
| 36 | { |
| 37 | u64 bit_mask; |
| 38 | u64 reg_val; |
| 39 | int timeout = 10; |
| 40 | |
| 41 | bit_mask = (1ULL << bits) - 1; |
| 42 | bit_mask = (bit_mask << bit_pos); |
| 43 | |
| 44 | while (timeout) { |
| 45 | reg_val = nicvf_queue_reg_read(nic, reg, qidx); |
| 46 | if (((reg_val & bit_mask) >> bit_pos) == val) |
| 47 | return 0; |
| 48 | usleep_range(1000, 2000); |
| 49 | timeout--; |
| 50 | } |
| 51 | netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg); |
| 52 | return 1; |
| 53 | } |
| 54 | |
| 55 | /* Allocate memory for a queue's descriptors */ |
| 56 | static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem, |
| 57 | int q_len, int desc_size, int align_bytes) |
| 58 | { |
| 59 | dmem->q_len = q_len; |
| 60 | dmem->size = (desc_size * q_len) + align_bytes; |
| 61 | /* Save address, need it while freeing */ |
| 62 | dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size, |
| 63 | &dmem->dma, GFP_KERNEL); |
| 64 | if (!dmem->unalign_base) |
| 65 | return -ENOMEM; |
| 66 | |
| 67 | /* Align memory address for 'align_bytes' */ |
| 68 | dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes); |
Aleksey Makarov | 39a0dd0 | 2015-06-02 11:00:25 -0700 | [diff] [blame] | 69 | dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 70 | return 0; |
| 71 | } |
| 72 | |
| 73 | /* Free queue's descriptor memory */ |
| 74 | static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem) |
| 75 | { |
| 76 | if (!dmem) |
| 77 | return; |
| 78 | |
| 79 | dma_free_coherent(&nic->pdev->dev, dmem->size, |
| 80 | dmem->unalign_base, dmem->dma); |
| 81 | dmem->unalign_base = NULL; |
| 82 | dmem->base = NULL; |
| 83 | } |
| 84 | |
Sunil Goutham | 7732253 | 2017-05-02 18:36:58 +0530 | [diff] [blame] | 85 | #define XDP_PAGE_REFCNT_REFILL 256 |
| 86 | |
Sunil Goutham | 5836b44 | 2017-05-02 18:36:50 +0530 | [diff] [blame] | 87 | /* Allocate a new page or recycle one if possible |
| 88 | * |
| 89 | * We cannot optimize dma mapping here, since |
| 90 | * 1. It's only one RBDR ring for 8 Rx queues. |
| 91 | * 2. CQE_RX gives address of the buffer where pkt has been DMA'ed |
| 92 | * and not idx into RBDR ring, so can't refer to saved info. |
| 93 | * 3. There are multiple receive buffers per page |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 94 | */ |
Sunil Goutham | 7732253 | 2017-05-02 18:36:58 +0530 | [diff] [blame] | 95 | static inline struct pgcache *nicvf_alloc_page(struct nicvf *nic, |
| 96 | struct rbdr *rbdr, gfp_t gfp) |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 97 | { |
Sunil Goutham | 7732253 | 2017-05-02 18:36:58 +0530 | [diff] [blame] | 98 | int ref_count; |
Sunil Goutham | 5836b44 | 2017-05-02 18:36:50 +0530 | [diff] [blame] | 99 | struct page *page = NULL; |
| 100 | struct pgcache *pgcache, *next; |
| 101 | |
| 102 | /* Check if page is already allocated */ |
| 103 | pgcache = &rbdr->pgcache[rbdr->pgidx]; |
| 104 | page = pgcache->page; |
| 105 | /* Check if page can be recycled */ |
Sunil Goutham | 7732253 | 2017-05-02 18:36:58 +0530 | [diff] [blame] | 106 | if (page) { |
| 107 | ref_count = page_ref_count(page); |
| 108 | /* Check if this page has been used once i.e 'put_page' |
| 109 | * called after packet transmission i.e internal ref_count |
| 110 | * and page's ref_count are equal i.e page can be recycled. |
| 111 | */ |
| 112 | if (rbdr->is_xdp && (ref_count == pgcache->ref_count)) |
| 113 | pgcache->ref_count--; |
| 114 | else |
| 115 | page = NULL; |
| 116 | |
| 117 | /* In non-XDP mode, page's ref_count needs to be '1' for it |
| 118 | * to be recycled. |
| 119 | */ |
| 120 | if (!rbdr->is_xdp && (ref_count != 1)) |
| 121 | page = NULL; |
| 122 | } |
Sunil Goutham | 5836b44 | 2017-05-02 18:36:50 +0530 | [diff] [blame] | 123 | |
| 124 | if (!page) { |
| 125 | page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, 0); |
| 126 | if (!page) |
| 127 | return NULL; |
| 128 | |
| 129 | this_cpu_inc(nic->pnicvf->drv_stats->page_alloc); |
| 130 | |
| 131 | /* Check for space */ |
| 132 | if (rbdr->pgalloc >= rbdr->pgcnt) { |
| 133 | /* Page can still be used */ |
| 134 | nic->rb_page = page; |
| 135 | return NULL; |
| 136 | } |
| 137 | |
| 138 | /* Save the page in page cache */ |
| 139 | pgcache->page = page; |
Sunil Goutham | c56d91c | 2017-05-02 18:36:55 +0530 | [diff] [blame] | 140 | pgcache->dma_addr = 0; |
Sunil Goutham | 7732253 | 2017-05-02 18:36:58 +0530 | [diff] [blame] | 141 | pgcache->ref_count = 0; |
Sunil Goutham | 5836b44 | 2017-05-02 18:36:50 +0530 | [diff] [blame] | 142 | rbdr->pgalloc++; |
| 143 | } |
| 144 | |
Sunil Goutham | 7732253 | 2017-05-02 18:36:58 +0530 | [diff] [blame] | 145 | /* Take additional page references for recycling */ |
| 146 | if (rbdr->is_xdp) { |
| 147 | /* Since there is single RBDR (i.e single core doing |
| 148 | * page recycling) per 8 Rx queues, in XDP mode adjusting |
| 149 | * page references atomically is the biggest bottleneck, so |
| 150 | * take bunch of references at a time. |
| 151 | * |
| 152 | * So here, below reference counts defer by '1'. |
| 153 | */ |
| 154 | if (!pgcache->ref_count) { |
| 155 | pgcache->ref_count = XDP_PAGE_REFCNT_REFILL; |
| 156 | page_ref_add(page, XDP_PAGE_REFCNT_REFILL); |
| 157 | } |
| 158 | } else { |
| 159 | /* In non-XDP case, single 64K page is divided across multiple |
| 160 | * receive buffers, so cost of recycling is less anyway. |
| 161 | * So we can do with just one extra reference. |
| 162 | */ |
| 163 | page_ref_add(page, 1); |
| 164 | } |
Sunil Goutham | 5836b44 | 2017-05-02 18:36:50 +0530 | [diff] [blame] | 165 | |
| 166 | rbdr->pgidx++; |
| 167 | rbdr->pgidx &= (rbdr->pgcnt - 1); |
| 168 | |
| 169 | /* Prefetch refcount of next page in page cache */ |
| 170 | next = &rbdr->pgcache[rbdr->pgidx]; |
| 171 | page = next->page; |
| 172 | if (page) |
| 173 | prefetch(&page->_refcount); |
| 174 | |
| 175 | return pgcache; |
| 176 | } |
| 177 | |
| 178 | /* Allocate buffer for packet reception */ |
| 179 | static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr, |
Sunil Goutham | 927987f | 2017-05-02 18:36:53 +0530 | [diff] [blame] | 180 | gfp_t gfp, u32 buf_len, u64 *rbuf) |
Sunil Goutham | 5836b44 | 2017-05-02 18:36:50 +0530 | [diff] [blame] | 181 | { |
| 182 | struct pgcache *pgcache = NULL; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 183 | |
Sunil Goutham | 05c773f | 2017-05-02 18:36:54 +0530 | [diff] [blame] | 184 | /* Check if request can be accomodated in previous allocated page. |
| 185 | * But in XDP mode only one buffer per page is permitted. |
| 186 | */ |
Sunil Goutham | c56d91c | 2017-05-02 18:36:55 +0530 | [diff] [blame] | 187 | if (!rbdr->is_xdp && nic->rb_page && |
Sunil Goutham | 5836b44 | 2017-05-02 18:36:50 +0530 | [diff] [blame] | 188 | ((nic->rb_page_offset + buf_len) <= PAGE_SIZE)) { |
Sunil Goutham | 5c2e26f | 2016-03-14 16:36:14 +0530 | [diff] [blame] | 189 | nic->rb_pageref++; |
| 190 | goto ret; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 191 | } |
| 192 | |
Sunil Goutham | 5c2e26f | 2016-03-14 16:36:14 +0530 | [diff] [blame] | 193 | nicvf_get_page(nic); |
Sunil Goutham | 5836b44 | 2017-05-02 18:36:50 +0530 | [diff] [blame] | 194 | nic->rb_page = NULL; |
Sunil Goutham | 5c2e26f | 2016-03-14 16:36:14 +0530 | [diff] [blame] | 195 | |
Sunil Goutham | 5836b44 | 2017-05-02 18:36:50 +0530 | [diff] [blame] | 196 | /* Get new page, either recycled or new one */ |
| 197 | pgcache = nicvf_alloc_page(nic, rbdr, gfp); |
| 198 | if (!pgcache && !nic->rb_page) { |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame] | 199 | this_cpu_inc(nic->pnicvf->drv_stats->rcv_buffer_alloc_failures); |
| 200 | return -ENOMEM; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 201 | } |
Sunil Goutham | 5836b44 | 2017-05-02 18:36:50 +0530 | [diff] [blame] | 202 | |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame] | 203 | nic->rb_page_offset = 0; |
Sunil Goutham | e3d06ff | 2017-05-02 18:36:57 +0530 | [diff] [blame] | 204 | |
| 205 | /* Reserve space for header modifications by BPF program */ |
| 206 | if (rbdr->is_xdp) |
| 207 | buf_len += XDP_PACKET_HEADROOM; |
| 208 | |
Sunil Goutham | 5836b44 | 2017-05-02 18:36:50 +0530 | [diff] [blame] | 209 | /* Check if it's recycled */ |
| 210 | if (pgcache) |
| 211 | nic->rb_page = pgcache->page; |
Sunil Goutham | 5c2e26f | 2016-03-14 16:36:14 +0530 | [diff] [blame] | 212 | ret: |
Sunil Goutham | c56d91c | 2017-05-02 18:36:55 +0530 | [diff] [blame] | 213 | if (rbdr->is_xdp && pgcache && pgcache->dma_addr) { |
| 214 | *rbuf = pgcache->dma_addr; |
| 215 | } else { |
| 216 | /* HW will ensure data coherency, CPU sync not required */ |
| 217 | *rbuf = (u64)dma_map_page_attrs(&nic->pdev->dev, nic->rb_page, |
| 218 | nic->rb_page_offset, buf_len, |
| 219 | DMA_FROM_DEVICE, |
| 220 | DMA_ATTR_SKIP_CPU_SYNC); |
| 221 | if (dma_mapping_error(&nic->pdev->dev, (dma_addr_t)*rbuf)) { |
| 222 | if (!nic->rb_page_offset) |
| 223 | __free_pages(nic->rb_page, 0); |
| 224 | nic->rb_page = NULL; |
| 225 | return -ENOMEM; |
| 226 | } |
| 227 | if (pgcache) |
Sunil Goutham | e3d06ff | 2017-05-02 18:36:57 +0530 | [diff] [blame] | 228 | pgcache->dma_addr = *rbuf + XDP_PACKET_HEADROOM; |
Sunil Goutham | c56d91c | 2017-05-02 18:36:55 +0530 | [diff] [blame] | 229 | nic->rb_page_offset += buf_len; |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame] | 230 | } |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 231 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 232 | return 0; |
| 233 | } |
| 234 | |
Sunil Goutham | 668dda0 | 2015-12-07 10:30:33 +0530 | [diff] [blame] | 235 | /* Build skb around receive buffer */ |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 236 | static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic, |
| 237 | u64 rb_ptr, int len) |
| 238 | { |
Sunil Goutham | 668dda0 | 2015-12-07 10:30:33 +0530 | [diff] [blame] | 239 | void *data; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 240 | struct sk_buff *skb; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 241 | |
Sunil Goutham | 668dda0 | 2015-12-07 10:30:33 +0530 | [diff] [blame] | 242 | data = phys_to_virt(rb_ptr); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 243 | |
| 244 | /* Now build an skb to give to stack */ |
Sunil Goutham | 668dda0 | 2015-12-07 10:30:33 +0530 | [diff] [blame] | 245 | skb = build_skb(data, RCV_FRAG_LEN); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 246 | if (!skb) { |
Sunil Goutham | 668dda0 | 2015-12-07 10:30:33 +0530 | [diff] [blame] | 247 | put_page(virt_to_page(data)); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 248 | return NULL; |
| 249 | } |
| 250 | |
Sunil Goutham | 668dda0 | 2015-12-07 10:30:33 +0530 | [diff] [blame] | 251 | prefetch(skb->data); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 252 | return skb; |
| 253 | } |
| 254 | |
| 255 | /* Allocate RBDR ring and populate receive buffers */ |
| 256 | static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, |
| 257 | int ring_len, int buf_size) |
| 258 | { |
| 259 | int idx; |
Sunil Goutham | 927987f | 2017-05-02 18:36:53 +0530 | [diff] [blame] | 260 | u64 rbuf; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 261 | struct rbdr_entry_t *desc; |
| 262 | int err; |
| 263 | |
| 264 | err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len, |
| 265 | sizeof(struct rbdr_entry_t), |
| 266 | NICVF_RCV_BUF_ALIGN_BYTES); |
| 267 | if (err) |
| 268 | return err; |
| 269 | |
| 270 | rbdr->desc = rbdr->dmem.base; |
| 271 | /* Buffer size has to be in multiples of 128 bytes */ |
| 272 | rbdr->dma_size = buf_size; |
| 273 | rbdr->enable = true; |
| 274 | rbdr->thresh = RBDR_THRESH; |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame] | 275 | rbdr->head = 0; |
| 276 | rbdr->tail = 0; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 277 | |
Sunil Goutham | 5836b44 | 2017-05-02 18:36:50 +0530 | [diff] [blame] | 278 | /* Initialize page recycling stuff. |
| 279 | * |
| 280 | * Can't use single buffer per page especially with 64K pages. |
| 281 | * On embedded platforms i.e 81xx/83xx available memory itself |
| 282 | * is low and minimum ring size of RBDR is 8K, that takes away |
| 283 | * lots of memory. |
Sunil Goutham | c56d91c | 2017-05-02 18:36:55 +0530 | [diff] [blame] | 284 | * |
| 285 | * But for XDP it has to be a single buffer per page. |
Sunil Goutham | 5836b44 | 2017-05-02 18:36:50 +0530 | [diff] [blame] | 286 | */ |
Sunil Goutham | c56d91c | 2017-05-02 18:36:55 +0530 | [diff] [blame] | 287 | if (!nic->pnicvf->xdp_prog) { |
| 288 | rbdr->pgcnt = ring_len / (PAGE_SIZE / buf_size); |
| 289 | rbdr->is_xdp = false; |
| 290 | } else { |
| 291 | rbdr->pgcnt = ring_len; |
| 292 | rbdr->is_xdp = true; |
| 293 | } |
Sunil Goutham | 5836b44 | 2017-05-02 18:36:50 +0530 | [diff] [blame] | 294 | rbdr->pgcnt = roundup_pow_of_two(rbdr->pgcnt); |
| 295 | rbdr->pgcache = kzalloc(sizeof(*rbdr->pgcache) * |
| 296 | rbdr->pgcnt, GFP_KERNEL); |
| 297 | if (!rbdr->pgcache) |
| 298 | return -ENOMEM; |
| 299 | rbdr->pgidx = 0; |
| 300 | rbdr->pgalloc = 0; |
| 301 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 302 | nic->rb_page = NULL; |
| 303 | for (idx = 0; idx < ring_len; idx++) { |
Sunil Goutham | 5836b44 | 2017-05-02 18:36:50 +0530 | [diff] [blame] | 304 | err = nicvf_alloc_rcv_buffer(nic, rbdr, GFP_KERNEL, |
| 305 | RCV_FRAG_LEN, &rbuf); |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame] | 306 | if (err) { |
| 307 | /* To free already allocated and mapped ones */ |
| 308 | rbdr->tail = idx - 1; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 309 | return err; |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame] | 310 | } |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 311 | |
| 312 | desc = GET_RBDR_DESC(rbdr, idx); |
Sunil Goutham | 927987f | 2017-05-02 18:36:53 +0530 | [diff] [blame] | 313 | desc->buf_addr = rbuf & ~(NICVF_RCV_BUF_ALIGN_BYTES - 1); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 314 | } |
Sunil Goutham | 5c2e26f | 2016-03-14 16:36:14 +0530 | [diff] [blame] | 315 | |
| 316 | nicvf_get_page(nic); |
| 317 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 318 | return 0; |
| 319 | } |
| 320 | |
| 321 | /* Free RBDR ring and its receive buffers */ |
| 322 | static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) |
| 323 | { |
| 324 | int head, tail; |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame] | 325 | u64 buf_addr, phys_addr; |
Sunil Goutham | 5836b44 | 2017-05-02 18:36:50 +0530 | [diff] [blame] | 326 | struct pgcache *pgcache; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 327 | struct rbdr_entry_t *desc; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 328 | |
| 329 | if (!rbdr) |
| 330 | return; |
| 331 | |
| 332 | rbdr->enable = false; |
| 333 | if (!rbdr->dmem.base) |
| 334 | return; |
| 335 | |
| 336 | head = rbdr->head; |
| 337 | tail = rbdr->tail; |
| 338 | |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame] | 339 | /* Release page references */ |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 340 | while (head != tail) { |
| 341 | desc = GET_RBDR_DESC(rbdr, head); |
Sunil Goutham | 5e848e4 | 2017-05-02 18:36:51 +0530 | [diff] [blame] | 342 | buf_addr = desc->buf_addr; |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame] | 343 | phys_addr = nicvf_iova_to_phys(nic, buf_addr); |
| 344 | dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN, |
| 345 | DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); |
| 346 | if (phys_addr) |
| 347 | put_page(virt_to_page(phys_to_virt(phys_addr))); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 348 | head++; |
| 349 | head &= (rbdr->dmem.q_len - 1); |
| 350 | } |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame] | 351 | /* Release buffer of tail desc */ |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 352 | desc = GET_RBDR_DESC(rbdr, tail); |
Sunil Goutham | 5e848e4 | 2017-05-02 18:36:51 +0530 | [diff] [blame] | 353 | buf_addr = desc->buf_addr; |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame] | 354 | phys_addr = nicvf_iova_to_phys(nic, buf_addr); |
| 355 | dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN, |
| 356 | DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); |
| 357 | if (phys_addr) |
| 358 | put_page(virt_to_page(phys_to_virt(phys_addr))); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 359 | |
Sunil Goutham | 5836b44 | 2017-05-02 18:36:50 +0530 | [diff] [blame] | 360 | /* Sync page cache info */ |
| 361 | smp_rmb(); |
| 362 | |
| 363 | /* Release additional page references held for recycling */ |
| 364 | head = 0; |
| 365 | while (head < rbdr->pgcnt) { |
| 366 | pgcache = &rbdr->pgcache[head]; |
Sunil Goutham | 7732253 | 2017-05-02 18:36:58 +0530 | [diff] [blame] | 367 | if (pgcache->page && page_ref_count(pgcache->page) != 0) { |
| 368 | if (!rbdr->is_xdp) { |
| 369 | put_page(pgcache->page); |
| 370 | continue; |
| 371 | } |
| 372 | page_ref_sub(pgcache->page, pgcache->ref_count - 1); |
Sunil Goutham | 5836b44 | 2017-05-02 18:36:50 +0530 | [diff] [blame] | 373 | put_page(pgcache->page); |
Sunil Goutham | 7732253 | 2017-05-02 18:36:58 +0530 | [diff] [blame] | 374 | } |
Sunil Goutham | 5836b44 | 2017-05-02 18:36:50 +0530 | [diff] [blame] | 375 | head++; |
| 376 | } |
| 377 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 378 | /* Free RBDR ring */ |
| 379 | nicvf_free_q_desc_mem(nic, &rbdr->dmem); |
| 380 | } |
| 381 | |
| 382 | /* Refill receive buffer descriptors with new buffers. |
| 383 | */ |
Aleksey Makarov | fd7ec06 | 2015-06-02 11:00:23 -0700 | [diff] [blame] | 384 | static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp) |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 385 | { |
| 386 | struct queue_set *qs = nic->qs; |
| 387 | int rbdr_idx = qs->rbdr_cnt; |
| 388 | int tail, qcount; |
| 389 | int refill_rb_cnt; |
| 390 | struct rbdr *rbdr; |
| 391 | struct rbdr_entry_t *desc; |
Sunil Goutham | 927987f | 2017-05-02 18:36:53 +0530 | [diff] [blame] | 392 | u64 rbuf; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 393 | int new_rb = 0; |
| 394 | |
| 395 | refill: |
| 396 | if (!rbdr_idx) |
| 397 | return; |
| 398 | rbdr_idx--; |
| 399 | rbdr = &qs->rbdr[rbdr_idx]; |
| 400 | /* Check if it's enabled */ |
| 401 | if (!rbdr->enable) |
| 402 | goto next_rbdr; |
| 403 | |
| 404 | /* Get no of desc's to be refilled */ |
| 405 | qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx); |
| 406 | qcount &= 0x7FFFF; |
| 407 | /* Doorbell can be ringed with a max of ring size minus 1 */ |
| 408 | if (qcount >= (qs->rbdr_len - 1)) |
| 409 | goto next_rbdr; |
| 410 | else |
| 411 | refill_rb_cnt = qs->rbdr_len - qcount - 1; |
| 412 | |
Sunil Goutham | 5836b44 | 2017-05-02 18:36:50 +0530 | [diff] [blame] | 413 | /* Sync page cache info */ |
| 414 | smp_rmb(); |
| 415 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 416 | /* Start filling descs from tail */ |
| 417 | tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3; |
| 418 | while (refill_rb_cnt) { |
| 419 | tail++; |
| 420 | tail &= (rbdr->dmem.q_len - 1); |
| 421 | |
Sunil Goutham | 5836b44 | 2017-05-02 18:36:50 +0530 | [diff] [blame] | 422 | if (nicvf_alloc_rcv_buffer(nic, rbdr, gfp, RCV_FRAG_LEN, &rbuf)) |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 423 | break; |
| 424 | |
| 425 | desc = GET_RBDR_DESC(rbdr, tail); |
Sunil Goutham | 927987f | 2017-05-02 18:36:53 +0530 | [diff] [blame] | 426 | desc->buf_addr = rbuf & ~(NICVF_RCV_BUF_ALIGN_BYTES - 1); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 427 | refill_rb_cnt--; |
| 428 | new_rb++; |
| 429 | } |
| 430 | |
Sunil Goutham | 5c2e26f | 2016-03-14 16:36:14 +0530 | [diff] [blame] | 431 | nicvf_get_page(nic); |
| 432 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 433 | /* make sure all memory stores are done before ringing doorbell */ |
| 434 | smp_wmb(); |
| 435 | |
| 436 | /* Check if buffer allocation failed */ |
| 437 | if (refill_rb_cnt) |
| 438 | nic->rb_alloc_fail = true; |
| 439 | else |
| 440 | nic->rb_alloc_fail = false; |
| 441 | |
| 442 | /* Notify HW */ |
| 443 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, |
| 444 | rbdr_idx, new_rb); |
| 445 | next_rbdr: |
| 446 | /* Re-enable RBDR interrupts only if buffer allocation is success */ |
Sunil Goutham | c94acf8 | 2016-11-15 17:38:29 +0530 | [diff] [blame] | 447 | if (!nic->rb_alloc_fail && rbdr->enable && |
| 448 | netif_running(nic->pnicvf->netdev)) |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 449 | nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx); |
| 450 | |
| 451 | if (rbdr_idx) |
| 452 | goto refill; |
| 453 | } |
| 454 | |
| 455 | /* Alloc rcv buffers in non-atomic mode for better success */ |
| 456 | void nicvf_rbdr_work(struct work_struct *work) |
| 457 | { |
| 458 | struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work); |
| 459 | |
| 460 | nicvf_refill_rbdr(nic, GFP_KERNEL); |
| 461 | if (nic->rb_alloc_fail) |
| 462 | schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10)); |
| 463 | else |
| 464 | nic->rb_work_scheduled = false; |
| 465 | } |
| 466 | |
| 467 | /* In Softirq context, alloc rcv buffers in atomic mode */ |
| 468 | void nicvf_rbdr_task(unsigned long data) |
| 469 | { |
| 470 | struct nicvf *nic = (struct nicvf *)data; |
| 471 | |
| 472 | nicvf_refill_rbdr(nic, GFP_ATOMIC); |
| 473 | if (nic->rb_alloc_fail) { |
| 474 | nic->rb_work_scheduled = true; |
| 475 | schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10)); |
| 476 | } |
| 477 | } |
| 478 | |
| 479 | /* Initialize completion queue */ |
| 480 | static int nicvf_init_cmp_queue(struct nicvf *nic, |
| 481 | struct cmp_queue *cq, int q_len) |
| 482 | { |
| 483 | int err; |
| 484 | |
| 485 | err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE, |
| 486 | NICVF_CQ_BASE_ALIGN_BYTES); |
| 487 | if (err) |
| 488 | return err; |
| 489 | |
| 490 | cq->desc = cq->dmem.base; |
Sunil Goutham | b9687b4 | 2015-12-10 13:25:20 +0530 | [diff] [blame] | 491 | cq->thresh = pass1_silicon(nic->pdev) ? 0 : CMP_QUEUE_CQE_THRESH; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 492 | nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1; |
| 493 | |
| 494 | return 0; |
| 495 | } |
| 496 | |
| 497 | static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq) |
| 498 | { |
| 499 | if (!cq) |
| 500 | return; |
| 501 | if (!cq->dmem.base) |
| 502 | return; |
| 503 | |
| 504 | nicvf_free_q_desc_mem(nic, &cq->dmem); |
| 505 | } |
| 506 | |
| 507 | /* Initialize transmit queue */ |
| 508 | static int nicvf_init_snd_queue(struct nicvf *nic, |
Sunil Goutham | 16f2bcc | 2017-05-02 18:36:56 +0530 | [diff] [blame] | 509 | struct snd_queue *sq, int q_len, int qidx) |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 510 | { |
| 511 | int err; |
| 512 | |
| 513 | err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE, |
| 514 | NICVF_SQ_BASE_ALIGN_BYTES); |
| 515 | if (err) |
| 516 | return err; |
| 517 | |
| 518 | sq->desc = sq->dmem.base; |
Aleksey Makarov | 86ace69 | 2015-06-02 11:00:27 -0700 | [diff] [blame] | 519 | sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL); |
Aleksey Makarov | fa1a6c9 | 2015-06-02 11:00:26 -0700 | [diff] [blame] | 520 | if (!sq->skbuff) |
| 521 | return -ENOMEM; |
Sunil Goutham | 16f2bcc | 2017-05-02 18:36:56 +0530 | [diff] [blame] | 522 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 523 | sq->head = 0; |
| 524 | sq->tail = 0; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 525 | sq->thresh = SND_QUEUE_THRESH; |
| 526 | |
Sunil Goutham | 16f2bcc | 2017-05-02 18:36:56 +0530 | [diff] [blame] | 527 | /* Check if this SQ is a XDP TX queue */ |
| 528 | if (nic->sqs_mode) |
| 529 | qidx += ((nic->sqs_id + 1) * MAX_SND_QUEUES_PER_QS); |
| 530 | if (qidx < nic->pnicvf->xdp_tx_queues) { |
| 531 | /* Alloc memory to save page pointers for XDP_TX */ |
| 532 | sq->xdp_page = kcalloc(q_len, sizeof(u64), GFP_KERNEL); |
| 533 | if (!sq->xdp_page) |
| 534 | return -ENOMEM; |
| 535 | sq->xdp_desc_cnt = 0; |
| 536 | sq->xdp_free_cnt = q_len - 1; |
| 537 | sq->is_xdp = true; |
| 538 | } else { |
| 539 | sq->xdp_page = NULL; |
| 540 | sq->xdp_desc_cnt = 0; |
| 541 | sq->xdp_free_cnt = 0; |
| 542 | sq->is_xdp = false; |
| 543 | |
| 544 | atomic_set(&sq->free_cnt, q_len - 1); |
| 545 | |
| 546 | /* Preallocate memory for TSO segment's header */ |
| 547 | sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev, |
| 548 | q_len * TSO_HEADER_SIZE, |
| 549 | &sq->tso_hdrs_phys, |
| 550 | GFP_KERNEL); |
| 551 | if (!sq->tso_hdrs) |
| 552 | return -ENOMEM; |
| 553 | } |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 554 | |
| 555 | return 0; |
| 556 | } |
| 557 | |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame] | 558 | void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq, |
| 559 | int hdr_sqe, u8 subdesc_cnt) |
| 560 | { |
| 561 | u8 idx; |
| 562 | struct sq_gather_subdesc *gather; |
| 563 | |
| 564 | /* Unmap DMA mapped skb data buffers */ |
| 565 | for (idx = 0; idx < subdesc_cnt; idx++) { |
| 566 | hdr_sqe++; |
| 567 | hdr_sqe &= (sq->dmem.q_len - 1); |
| 568 | gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, hdr_sqe); |
| 569 | /* HW will ensure data coherency, CPU sync not required */ |
| 570 | dma_unmap_page_attrs(&nic->pdev->dev, gather->addr, |
| 571 | gather->size, DMA_TO_DEVICE, |
| 572 | DMA_ATTR_SKIP_CPU_SYNC); |
| 573 | } |
| 574 | } |
| 575 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 576 | static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) |
| 577 | { |
Sunil Goutham | c94acf8 | 2016-11-15 17:38:29 +0530 | [diff] [blame] | 578 | struct sk_buff *skb; |
Sunil Goutham | 16f2bcc | 2017-05-02 18:36:56 +0530 | [diff] [blame] | 579 | struct page *page; |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame] | 580 | struct sq_hdr_subdesc *hdr; |
| 581 | struct sq_hdr_subdesc *tso_sqe; |
Sunil Goutham | c94acf8 | 2016-11-15 17:38:29 +0530 | [diff] [blame] | 582 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 583 | if (!sq) |
| 584 | return; |
| 585 | if (!sq->dmem.base) |
| 586 | return; |
| 587 | |
| 588 | if (sq->tso_hdrs) |
Sunil Goutham | 143ceb0 | 2015-07-29 16:49:37 +0300 | [diff] [blame] | 589 | dma_free_coherent(&nic->pdev->dev, |
| 590 | sq->dmem.q_len * TSO_HEADER_SIZE, |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 591 | sq->tso_hdrs, sq->tso_hdrs_phys); |
| 592 | |
Sunil Goutham | c94acf8 | 2016-11-15 17:38:29 +0530 | [diff] [blame] | 593 | /* Free pending skbs in the queue */ |
| 594 | smp_rmb(); |
| 595 | while (sq->head != sq->tail) { |
| 596 | skb = (struct sk_buff *)sq->skbuff[sq->head]; |
Sunil Goutham | 16f2bcc | 2017-05-02 18:36:56 +0530 | [diff] [blame] | 597 | if (!skb || !sq->xdp_page) |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame] | 598 | goto next; |
Sunil Goutham | 16f2bcc | 2017-05-02 18:36:56 +0530 | [diff] [blame] | 599 | |
| 600 | page = (struct page *)sq->xdp_page[sq->head]; |
| 601 | if (!page) |
| 602 | goto next; |
| 603 | else |
| 604 | put_page(page); |
| 605 | |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame] | 606 | hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); |
| 607 | /* Check for dummy descriptor used for HW TSO offload on 88xx */ |
| 608 | if (hdr->dont_send) { |
| 609 | /* Get actual TSO descriptors and unmap them */ |
| 610 | tso_sqe = |
| 611 | (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2); |
| 612 | nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2, |
| 613 | tso_sqe->subdesc_cnt); |
| 614 | } else { |
| 615 | nicvf_unmap_sndq_buffers(nic, sq, sq->head, |
| 616 | hdr->subdesc_cnt); |
| 617 | } |
Sunil Goutham | 16f2bcc | 2017-05-02 18:36:56 +0530 | [diff] [blame] | 618 | if (skb) |
| 619 | dev_kfree_skb_any(skb); |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame] | 620 | next: |
Sunil Goutham | c94acf8 | 2016-11-15 17:38:29 +0530 | [diff] [blame] | 621 | sq->head++; |
| 622 | sq->head &= (sq->dmem.q_len - 1); |
| 623 | } |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 624 | kfree(sq->skbuff); |
Sunil Goutham | 16f2bcc | 2017-05-02 18:36:56 +0530 | [diff] [blame] | 625 | kfree(sq->xdp_page); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 626 | nicvf_free_q_desc_mem(nic, &sq->dmem); |
| 627 | } |
| 628 | |
| 629 | static void nicvf_reclaim_snd_queue(struct nicvf *nic, |
| 630 | struct queue_set *qs, int qidx) |
| 631 | { |
| 632 | /* Disable send queue */ |
| 633 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0); |
| 634 | /* Check if SQ is stopped */ |
| 635 | if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01)) |
| 636 | return; |
| 637 | /* Reset send queue */ |
| 638 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); |
| 639 | } |
| 640 | |
| 641 | static void nicvf_reclaim_rcv_queue(struct nicvf *nic, |
| 642 | struct queue_set *qs, int qidx) |
| 643 | { |
| 644 | union nic_mbx mbx = {}; |
| 645 | |
| 646 | /* Make sure all packets in the pipeline are written back into mem */ |
| 647 | mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC; |
| 648 | nicvf_send_msg_to_pf(nic, &mbx); |
| 649 | } |
| 650 | |
| 651 | static void nicvf_reclaim_cmp_queue(struct nicvf *nic, |
| 652 | struct queue_set *qs, int qidx) |
| 653 | { |
| 654 | /* Disable timer threshold (doesn't get reset upon CQ reset */ |
| 655 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0); |
| 656 | /* Disable completion queue */ |
| 657 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0); |
| 658 | /* Reset completion queue */ |
| 659 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); |
| 660 | } |
| 661 | |
| 662 | static void nicvf_reclaim_rbdr(struct nicvf *nic, |
| 663 | struct rbdr *rbdr, int qidx) |
| 664 | { |
| 665 | u64 tmp, fifo_state; |
| 666 | int timeout = 10; |
| 667 | |
| 668 | /* Save head and tail pointers for feeing up buffers */ |
| 669 | rbdr->head = nicvf_queue_reg_read(nic, |
| 670 | NIC_QSET_RBDR_0_1_HEAD, |
| 671 | qidx) >> 3; |
| 672 | rbdr->tail = nicvf_queue_reg_read(nic, |
| 673 | NIC_QSET_RBDR_0_1_TAIL, |
| 674 | qidx) >> 3; |
| 675 | |
| 676 | /* If RBDR FIFO is in 'FAIL' state then do a reset first |
| 677 | * before relaiming. |
| 678 | */ |
| 679 | fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx); |
| 680 | if (((fifo_state >> 62) & 0x03) == 0x3) |
| 681 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, |
| 682 | qidx, NICVF_RBDR_RESET); |
| 683 | |
| 684 | /* Disable RBDR */ |
| 685 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0); |
| 686 | if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) |
| 687 | return; |
| 688 | while (1) { |
| 689 | tmp = nicvf_queue_reg_read(nic, |
| 690 | NIC_QSET_RBDR_0_1_PREFETCH_STATUS, |
| 691 | qidx); |
| 692 | if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF)) |
| 693 | break; |
| 694 | usleep_range(1000, 2000); |
| 695 | timeout--; |
| 696 | if (!timeout) { |
| 697 | netdev_err(nic->netdev, |
| 698 | "Failed polling on prefetch status\n"); |
| 699 | return; |
| 700 | } |
| 701 | } |
| 702 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, |
| 703 | qidx, NICVF_RBDR_RESET); |
| 704 | |
| 705 | if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02)) |
| 706 | return; |
| 707 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00); |
| 708 | if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) |
| 709 | return; |
| 710 | } |
| 711 | |
Sunil Goutham | aa2e259 | 2015-08-30 12:29:13 +0300 | [diff] [blame] | 712 | void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features) |
| 713 | { |
| 714 | u64 rq_cfg; |
| 715 | int sqs; |
| 716 | |
| 717 | rq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_RQ_GEN_CFG, 0); |
| 718 | |
| 719 | /* Enable first VLAN stripping */ |
| 720 | if (features & NETIF_F_HW_VLAN_CTAG_RX) |
| 721 | rq_cfg |= (1ULL << 25); |
| 722 | else |
| 723 | rq_cfg &= ~(1ULL << 25); |
| 724 | nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, rq_cfg); |
| 725 | |
| 726 | /* Configure Secondary Qsets, if any */ |
| 727 | for (sqs = 0; sqs < nic->sqs_count; sqs++) |
| 728 | if (nic->snicvf[sqs]) |
| 729 | nicvf_queue_reg_write(nic->snicvf[sqs], |
| 730 | NIC_QSET_RQ_GEN_CFG, 0, rq_cfg); |
| 731 | } |
| 732 | |
Jerin Jacob | 3458c40 | 2016-08-12 16:51:39 +0530 | [diff] [blame] | 733 | static void nicvf_reset_rcv_queue_stats(struct nicvf *nic) |
| 734 | { |
| 735 | union nic_mbx mbx = {}; |
| 736 | |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 737 | /* Reset all RQ/SQ and VF stats */ |
Jerin Jacob | 3458c40 | 2016-08-12 16:51:39 +0530 | [diff] [blame] | 738 | mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER; |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 739 | mbx.reset_stat.rx_stat_mask = 0x3FFF; |
| 740 | mbx.reset_stat.tx_stat_mask = 0x1F; |
Jerin Jacob | 3458c40 | 2016-08-12 16:51:39 +0530 | [diff] [blame] | 741 | mbx.reset_stat.rq_stat_mask = 0xFFFF; |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 742 | mbx.reset_stat.sq_stat_mask = 0xFFFF; |
Jerin Jacob | 3458c40 | 2016-08-12 16:51:39 +0530 | [diff] [blame] | 743 | nicvf_send_msg_to_pf(nic, &mbx); |
| 744 | } |
| 745 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 746 | /* Configures receive queue */ |
| 747 | static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, |
| 748 | int qidx, bool enable) |
| 749 | { |
| 750 | union nic_mbx mbx = {}; |
| 751 | struct rcv_queue *rq; |
| 752 | struct rq_cfg rq_cfg; |
| 753 | |
| 754 | rq = &qs->rq[qidx]; |
| 755 | rq->enable = enable; |
| 756 | |
| 757 | /* Disable receive queue */ |
| 758 | nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0); |
| 759 | |
| 760 | if (!rq->enable) { |
| 761 | nicvf_reclaim_rcv_queue(nic, qs, qidx); |
| 762 | return; |
| 763 | } |
| 764 | |
| 765 | rq->cq_qs = qs->vnic_id; |
| 766 | rq->cq_idx = qidx; |
| 767 | rq->start_rbdr_qs = qs->vnic_id; |
| 768 | rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1; |
| 769 | rq->cont_rbdr_qs = qs->vnic_id; |
| 770 | rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1; |
| 771 | /* all writes of RBDR data to be loaded into L2 Cache as well*/ |
| 772 | rq->caching = 1; |
| 773 | |
| 774 | /* Send a mailbox msg to PF to config RQ */ |
| 775 | mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG; |
| 776 | mbx.rq.qs_num = qs->vnic_id; |
| 777 | mbx.rq.rq_num = qidx; |
| 778 | mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) | |
| 779 | (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) | |
| 780 | (rq->cont_qs_rbdr_idx << 8) | |
| 781 | (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx); |
| 782 | nicvf_send_msg_to_pf(nic, &mbx); |
| 783 | |
| 784 | mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG; |
Sunil Goutham | d5b2d7a | 2016-11-24 14:48:02 +0530 | [diff] [blame] | 785 | mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) | |
| 786 | (RQ_PASS_RBDR_LVL << 16) | (RQ_PASS_CQ_LVL << 8) | |
| 787 | (qs->vnic_id << 0); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 788 | nicvf_send_msg_to_pf(nic, &mbx); |
| 789 | |
| 790 | /* RQ drop config |
| 791 | * Enable CQ drop to reserve sufficient CQEs for all tx packets |
| 792 | */ |
| 793 | mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG; |
Sunil Goutham | d5b2d7a | 2016-11-24 14:48:02 +0530 | [diff] [blame] | 794 | mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) | |
| 795 | (RQ_PASS_RBDR_LVL << 40) | (RQ_DROP_RBDR_LVL << 32) | |
| 796 | (RQ_PASS_CQ_LVL << 16) | (RQ_DROP_CQ_LVL << 8); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 797 | nicvf_send_msg_to_pf(nic, &mbx); |
| 798 | |
Sunil Goutham | cadcf95 | 2016-11-15 17:37:54 +0530 | [diff] [blame] | 799 | if (!nic->sqs_mode && (qidx == 0)) { |
Thanneeru Srinivasulu | 36fa35d | 2017-03-07 18:09:11 +0530 | [diff] [blame] | 800 | /* Enable checking L3/L4 length and TCP/UDP checksums |
| 801 | * Also allow IPv6 pkts with zero UDP checksum. |
| 802 | */ |
Sunil Goutham | cadcf95 | 2016-11-15 17:37:54 +0530 | [diff] [blame] | 803 | nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, |
Thanneeru Srinivasulu | 36fa35d | 2017-03-07 18:09:11 +0530 | [diff] [blame] | 804 | (BIT(24) | BIT(23) | BIT(21) | BIT(20))); |
Sunil Goutham | aa2e259 | 2015-08-30 12:29:13 +0300 | [diff] [blame] | 805 | nicvf_config_vlan_stripping(nic, nic->netdev->features); |
Sunil Goutham | cadcf95 | 2016-11-15 17:37:54 +0530 | [diff] [blame] | 806 | } |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 807 | |
| 808 | /* Enable Receive queue */ |
xypron.glpk@gmx.de | 161de2c | 2016-05-09 00:46:18 +0200 | [diff] [blame] | 809 | memset(&rq_cfg, 0, sizeof(struct rq_cfg)); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 810 | rq_cfg.ena = 1; |
| 811 | rq_cfg.tcp_ena = 0; |
| 812 | nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg); |
| 813 | } |
| 814 | |
| 815 | /* Configures completion queue */ |
| 816 | void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, |
| 817 | int qidx, bool enable) |
| 818 | { |
| 819 | struct cmp_queue *cq; |
| 820 | struct cq_cfg cq_cfg; |
| 821 | |
| 822 | cq = &qs->cq[qidx]; |
| 823 | cq->enable = enable; |
| 824 | |
| 825 | if (!cq->enable) { |
| 826 | nicvf_reclaim_cmp_queue(nic, qs, qidx); |
| 827 | return; |
| 828 | } |
| 829 | |
| 830 | /* Reset completion queue */ |
| 831 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); |
| 832 | |
| 833 | if (!cq->enable) |
| 834 | return; |
| 835 | |
| 836 | spin_lock_init(&cq->lock); |
| 837 | /* Set completion queue base address */ |
| 838 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, |
| 839 | qidx, (u64)(cq->dmem.phys_base)); |
| 840 | |
| 841 | /* Enable Completion queue */ |
xypron.glpk@gmx.de | 161de2c | 2016-05-09 00:46:18 +0200 | [diff] [blame] | 842 | memset(&cq_cfg, 0, sizeof(struct cq_cfg)); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 843 | cq_cfg.ena = 1; |
| 844 | cq_cfg.reset = 0; |
| 845 | cq_cfg.caching = 0; |
Sunil Goutham | fff4ffd | 2017-01-25 17:36:23 +0530 | [diff] [blame] | 846 | cq_cfg.qsize = ilog2(qs->cq_len >> 10); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 847 | cq_cfg.avg_con = 0; |
| 848 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg); |
| 849 | |
| 850 | /* Set threshold value for interrupt generation */ |
| 851 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); |
| 852 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, |
Sunil Goutham | 006394a | 2015-12-02 15:36:15 +0530 | [diff] [blame] | 853 | qidx, CMP_QUEUE_TIMER_THRESH); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 854 | } |
| 855 | |
| 856 | /* Configures transmit queue */ |
| 857 | static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, |
| 858 | int qidx, bool enable) |
| 859 | { |
| 860 | union nic_mbx mbx = {}; |
| 861 | struct snd_queue *sq; |
| 862 | struct sq_cfg sq_cfg; |
| 863 | |
| 864 | sq = &qs->sq[qidx]; |
| 865 | sq->enable = enable; |
| 866 | |
| 867 | if (!sq->enable) { |
| 868 | nicvf_reclaim_snd_queue(nic, qs, qidx); |
| 869 | return; |
| 870 | } |
| 871 | |
| 872 | /* Reset send queue */ |
| 873 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); |
| 874 | |
| 875 | sq->cq_qs = qs->vnic_id; |
| 876 | sq->cq_idx = qidx; |
| 877 | |
| 878 | /* Send a mailbox msg to PF to config SQ */ |
| 879 | mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG; |
| 880 | mbx.sq.qs_num = qs->vnic_id; |
| 881 | mbx.sq.sq_num = qidx; |
Sunil Goutham | 92dc876 | 2015-08-30 12:29:15 +0300 | [diff] [blame] | 882 | mbx.sq.sqs_mode = nic->sqs_mode; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 883 | mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx; |
| 884 | nicvf_send_msg_to_pf(nic, &mbx); |
| 885 | |
| 886 | /* Set queue base address */ |
| 887 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, |
| 888 | qidx, (u64)(sq->dmem.phys_base)); |
| 889 | |
| 890 | /* Enable send queue & set queue size */ |
xypron.glpk@gmx.de | 161de2c | 2016-05-09 00:46:18 +0200 | [diff] [blame] | 891 | memset(&sq_cfg, 0, sizeof(struct sq_cfg)); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 892 | sq_cfg.ena = 1; |
| 893 | sq_cfg.reset = 0; |
| 894 | sq_cfg.ldwb = 0; |
Sunil Goutham | fff4ffd | 2017-01-25 17:36:23 +0530 | [diff] [blame] | 895 | sq_cfg.qsize = ilog2(qs->sq_len >> 10); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 896 | sq_cfg.tstmp_bgx_intf = 0; |
Sunil Goutham | fff4ffd | 2017-01-25 17:36:23 +0530 | [diff] [blame] | 897 | /* CQ's level at which HW will stop processing SQEs to avoid |
| 898 | * transmitting a pkt with no space in CQ to post CQE_TX. |
| 899 | */ |
| 900 | sq_cfg.cq_limit = (CMP_QUEUE_PIPELINE_RSVD * 256) / qs->cq_len; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 901 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg); |
| 902 | |
| 903 | /* Set threshold value for interrupt generation */ |
| 904 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh); |
| 905 | |
| 906 | /* Set queue:cpu affinity for better load distribution */ |
| 907 | if (cpu_online(qidx)) { |
| 908 | cpumask_set_cpu(qidx, &sq->affinity_mask); |
| 909 | netif_set_xps_queue(nic->netdev, |
| 910 | &sq->affinity_mask, qidx); |
| 911 | } |
| 912 | } |
| 913 | |
| 914 | /* Configures receive buffer descriptor ring */ |
| 915 | static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, |
| 916 | int qidx, bool enable) |
| 917 | { |
| 918 | struct rbdr *rbdr; |
| 919 | struct rbdr_cfg rbdr_cfg; |
| 920 | |
| 921 | rbdr = &qs->rbdr[qidx]; |
| 922 | nicvf_reclaim_rbdr(nic, rbdr, qidx); |
| 923 | if (!enable) |
| 924 | return; |
| 925 | |
| 926 | /* Set descriptor base address */ |
| 927 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, |
| 928 | qidx, (u64)(rbdr->dmem.phys_base)); |
| 929 | |
| 930 | /* Enable RBDR & set queue size */ |
| 931 | /* Buffer size should be in multiples of 128 bytes */ |
xypron.glpk@gmx.de | 161de2c | 2016-05-09 00:46:18 +0200 | [diff] [blame] | 932 | memset(&rbdr_cfg, 0, sizeof(struct rbdr_cfg)); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 933 | rbdr_cfg.ena = 1; |
| 934 | rbdr_cfg.reset = 0; |
| 935 | rbdr_cfg.ldwb = 0; |
| 936 | rbdr_cfg.qsize = RBDR_SIZE; |
| 937 | rbdr_cfg.avg_con = 0; |
| 938 | rbdr_cfg.lines = rbdr->dma_size / 128; |
| 939 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, |
| 940 | qidx, *(u64 *)&rbdr_cfg); |
| 941 | |
| 942 | /* Notify HW */ |
| 943 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, |
| 944 | qidx, qs->rbdr_len - 1); |
| 945 | |
| 946 | /* Set threshold value for interrupt generation */ |
| 947 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, |
| 948 | qidx, rbdr->thresh - 1); |
| 949 | } |
| 950 | |
| 951 | /* Requests PF to assign and enable Qset */ |
| 952 | void nicvf_qset_config(struct nicvf *nic, bool enable) |
| 953 | { |
| 954 | union nic_mbx mbx = {}; |
| 955 | struct queue_set *qs = nic->qs; |
| 956 | struct qs_cfg *qs_cfg; |
| 957 | |
| 958 | if (!qs) { |
| 959 | netdev_warn(nic->netdev, |
| 960 | "Qset is still not allocated, don't init queues\n"); |
| 961 | return; |
| 962 | } |
| 963 | |
| 964 | qs->enable = enable; |
| 965 | qs->vnic_id = nic->vf_id; |
| 966 | |
| 967 | /* Send a mailbox msg to PF to config Qset */ |
| 968 | mbx.qs.msg = NIC_MBOX_MSG_QS_CFG; |
| 969 | mbx.qs.num = qs->vnic_id; |
Sunil Goutham | 92dc876 | 2015-08-30 12:29:15 +0300 | [diff] [blame] | 970 | mbx.qs.sqs_count = nic->sqs_count; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 971 | |
| 972 | mbx.qs.cfg = 0; |
| 973 | qs_cfg = (struct qs_cfg *)&mbx.qs.cfg; |
| 974 | if (qs->enable) { |
| 975 | qs_cfg->ena = 1; |
| 976 | #ifdef __BIG_ENDIAN |
| 977 | qs_cfg->be = 1; |
| 978 | #endif |
| 979 | qs_cfg->vnic = qs->vnic_id; |
| 980 | } |
| 981 | nicvf_send_msg_to_pf(nic, &mbx); |
| 982 | } |
| 983 | |
| 984 | static void nicvf_free_resources(struct nicvf *nic) |
| 985 | { |
| 986 | int qidx; |
| 987 | struct queue_set *qs = nic->qs; |
| 988 | |
| 989 | /* Free receive buffer descriptor ring */ |
| 990 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) |
| 991 | nicvf_free_rbdr(nic, &qs->rbdr[qidx]); |
| 992 | |
| 993 | /* Free completion queue */ |
| 994 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) |
| 995 | nicvf_free_cmp_queue(nic, &qs->cq[qidx]); |
| 996 | |
| 997 | /* Free send queue */ |
| 998 | for (qidx = 0; qidx < qs->sq_cnt; qidx++) |
| 999 | nicvf_free_snd_queue(nic, &qs->sq[qidx]); |
| 1000 | } |
| 1001 | |
| 1002 | static int nicvf_alloc_resources(struct nicvf *nic) |
| 1003 | { |
| 1004 | int qidx; |
| 1005 | struct queue_set *qs = nic->qs; |
| 1006 | |
| 1007 | /* Alloc receive buffer descriptor ring */ |
| 1008 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { |
| 1009 | if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len, |
| 1010 | DMA_BUFFER_LEN)) |
| 1011 | goto alloc_fail; |
| 1012 | } |
| 1013 | |
| 1014 | /* Alloc send queue */ |
| 1015 | for (qidx = 0; qidx < qs->sq_cnt; qidx++) { |
Sunil Goutham | 16f2bcc | 2017-05-02 18:36:56 +0530 | [diff] [blame] | 1016 | if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len, qidx)) |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1017 | goto alloc_fail; |
| 1018 | } |
| 1019 | |
| 1020 | /* Alloc completion queue */ |
| 1021 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) { |
| 1022 | if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len)) |
| 1023 | goto alloc_fail; |
| 1024 | } |
| 1025 | |
| 1026 | return 0; |
| 1027 | alloc_fail: |
| 1028 | nicvf_free_resources(nic); |
| 1029 | return -ENOMEM; |
| 1030 | } |
| 1031 | |
| 1032 | int nicvf_set_qset_resources(struct nicvf *nic) |
| 1033 | { |
| 1034 | struct queue_set *qs; |
| 1035 | |
| 1036 | qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL); |
| 1037 | if (!qs) |
| 1038 | return -ENOMEM; |
| 1039 | nic->qs = qs; |
| 1040 | |
| 1041 | /* Set count of each queue */ |
Sunil Goutham | 3a397eb | 2016-08-12 16:51:27 +0530 | [diff] [blame] | 1042 | qs->rbdr_cnt = DEFAULT_RBDR_CNT; |
| 1043 | qs->rq_cnt = min_t(u8, MAX_RCV_QUEUES_PER_QS, num_online_cpus()); |
| 1044 | qs->sq_cnt = min_t(u8, MAX_SND_QUEUES_PER_QS, num_online_cpus()); |
| 1045 | qs->cq_cnt = max_t(u8, qs->rq_cnt, qs->sq_cnt); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1046 | |
| 1047 | /* Set queue lengths */ |
| 1048 | qs->rbdr_len = RCV_BUF_COUNT; |
| 1049 | qs->sq_len = SND_QUEUE_LEN; |
| 1050 | qs->cq_len = CMP_QUEUE_LEN; |
Sunil Goutham | 92dc876 | 2015-08-30 12:29:15 +0300 | [diff] [blame] | 1051 | |
| 1052 | nic->rx_queues = qs->rq_cnt; |
| 1053 | nic->tx_queues = qs->sq_cnt; |
Sunil Goutham | 05c773f | 2017-05-02 18:36:54 +0530 | [diff] [blame] | 1054 | nic->xdp_tx_queues = 0; |
Sunil Goutham | 92dc876 | 2015-08-30 12:29:15 +0300 | [diff] [blame] | 1055 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1056 | return 0; |
| 1057 | } |
| 1058 | |
| 1059 | int nicvf_config_data_transfer(struct nicvf *nic, bool enable) |
| 1060 | { |
| 1061 | bool disable = false; |
| 1062 | struct queue_set *qs = nic->qs; |
Sunil Goutham | fff4ffd | 2017-01-25 17:36:23 +0530 | [diff] [blame] | 1063 | struct queue_set *pqs = nic->pnicvf->qs; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1064 | int qidx; |
| 1065 | |
| 1066 | if (!qs) |
| 1067 | return 0; |
| 1068 | |
Sunil Goutham | fff4ffd | 2017-01-25 17:36:23 +0530 | [diff] [blame] | 1069 | /* Take primary VF's queue lengths. |
| 1070 | * This is needed to take queue lengths set from ethtool |
| 1071 | * into consideration. |
| 1072 | */ |
| 1073 | if (nic->sqs_mode && pqs) { |
| 1074 | qs->cq_len = pqs->cq_len; |
| 1075 | qs->sq_len = pqs->sq_len; |
| 1076 | } |
| 1077 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1078 | if (enable) { |
| 1079 | if (nicvf_alloc_resources(nic)) |
| 1080 | return -ENOMEM; |
| 1081 | |
| 1082 | for (qidx = 0; qidx < qs->sq_cnt; qidx++) |
| 1083 | nicvf_snd_queue_config(nic, qs, qidx, enable); |
| 1084 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) |
| 1085 | nicvf_cmp_queue_config(nic, qs, qidx, enable); |
| 1086 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) |
| 1087 | nicvf_rbdr_config(nic, qs, qidx, enable); |
| 1088 | for (qidx = 0; qidx < qs->rq_cnt; qidx++) |
| 1089 | nicvf_rcv_queue_config(nic, qs, qidx, enable); |
| 1090 | } else { |
| 1091 | for (qidx = 0; qidx < qs->rq_cnt; qidx++) |
| 1092 | nicvf_rcv_queue_config(nic, qs, qidx, disable); |
| 1093 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) |
| 1094 | nicvf_rbdr_config(nic, qs, qidx, disable); |
| 1095 | for (qidx = 0; qidx < qs->sq_cnt; qidx++) |
| 1096 | nicvf_snd_queue_config(nic, qs, qidx, disable); |
| 1097 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) |
| 1098 | nicvf_cmp_queue_config(nic, qs, qidx, disable); |
| 1099 | |
| 1100 | nicvf_free_resources(nic); |
| 1101 | } |
| 1102 | |
Jerin Jacob | 3458c40 | 2016-08-12 16:51:39 +0530 | [diff] [blame] | 1103 | /* Reset RXQ's stats. |
| 1104 | * SQ's stats will get reset automatically once SQ is reset. |
| 1105 | */ |
| 1106 | nicvf_reset_rcv_queue_stats(nic); |
| 1107 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1108 | return 0; |
| 1109 | } |
| 1110 | |
| 1111 | /* Get a free desc from SQ |
| 1112 | * returns descriptor ponter & descriptor number |
| 1113 | */ |
| 1114 | static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt) |
| 1115 | { |
| 1116 | int qentry; |
| 1117 | |
| 1118 | qentry = sq->tail; |
Sunil Goutham | 16f2bcc | 2017-05-02 18:36:56 +0530 | [diff] [blame] | 1119 | if (!sq->is_xdp) |
| 1120 | atomic_sub(desc_cnt, &sq->free_cnt); |
| 1121 | else |
| 1122 | sq->xdp_free_cnt -= desc_cnt; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1123 | sq->tail += desc_cnt; |
| 1124 | sq->tail &= (sq->dmem.q_len - 1); |
| 1125 | |
| 1126 | return qentry; |
| 1127 | } |
| 1128 | |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame] | 1129 | /* Rollback to previous tail pointer when descriptors not used */ |
| 1130 | static inline void nicvf_rollback_sq_desc(struct snd_queue *sq, |
| 1131 | int qentry, int desc_cnt) |
| 1132 | { |
| 1133 | sq->tail = qentry; |
| 1134 | atomic_add(desc_cnt, &sq->free_cnt); |
| 1135 | } |
| 1136 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1137 | /* Free descriptor back to SQ for future use */ |
| 1138 | void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt) |
| 1139 | { |
Sunil Goutham | 16f2bcc | 2017-05-02 18:36:56 +0530 | [diff] [blame] | 1140 | if (!sq->is_xdp) |
| 1141 | atomic_add(desc_cnt, &sq->free_cnt); |
| 1142 | else |
| 1143 | sq->xdp_free_cnt += desc_cnt; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1144 | sq->head += desc_cnt; |
| 1145 | sq->head &= (sq->dmem.q_len - 1); |
| 1146 | } |
| 1147 | |
| 1148 | static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry) |
| 1149 | { |
| 1150 | qentry++; |
| 1151 | qentry &= (sq->dmem.q_len - 1); |
| 1152 | return qentry; |
| 1153 | } |
| 1154 | |
| 1155 | void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx) |
| 1156 | { |
| 1157 | u64 sq_cfg; |
| 1158 | |
| 1159 | sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); |
| 1160 | sq_cfg |= NICVF_SQ_EN; |
| 1161 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); |
| 1162 | /* Ring doorbell so that H/W restarts processing SQEs */ |
| 1163 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0); |
| 1164 | } |
| 1165 | |
| 1166 | void nicvf_sq_disable(struct nicvf *nic, int qidx) |
| 1167 | { |
| 1168 | u64 sq_cfg; |
| 1169 | |
| 1170 | sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); |
| 1171 | sq_cfg &= ~NICVF_SQ_EN; |
| 1172 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); |
| 1173 | } |
| 1174 | |
| 1175 | void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq, |
| 1176 | int qidx) |
| 1177 | { |
| 1178 | u64 head, tail; |
| 1179 | struct sk_buff *skb; |
| 1180 | struct nicvf *nic = netdev_priv(netdev); |
| 1181 | struct sq_hdr_subdesc *hdr; |
| 1182 | |
| 1183 | head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4; |
| 1184 | tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4; |
| 1185 | while (sq->head != head) { |
| 1186 | hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); |
| 1187 | if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) { |
| 1188 | nicvf_put_sq_desc(sq, 1); |
| 1189 | continue; |
| 1190 | } |
| 1191 | skb = (struct sk_buff *)sq->skbuff[sq->head]; |
Sunil Goutham | 143ceb0 | 2015-07-29 16:49:37 +0300 | [diff] [blame] | 1192 | if (skb) |
| 1193 | dev_kfree_skb_any(skb); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1194 | atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets); |
| 1195 | atomic64_add(hdr->tot_len, |
| 1196 | (atomic64_t *)&netdev->stats.tx_bytes); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1197 | nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); |
| 1198 | } |
| 1199 | } |
| 1200 | |
Sunil Goutham | 16f2bcc | 2017-05-02 18:36:56 +0530 | [diff] [blame] | 1201 | /* XDP Transmit APIs */ |
| 1202 | void nicvf_xdp_sq_doorbell(struct nicvf *nic, |
| 1203 | struct snd_queue *sq, int sq_num) |
| 1204 | { |
| 1205 | if (!sq->xdp_desc_cnt) |
| 1206 | return; |
| 1207 | |
| 1208 | /* make sure all memory stores are done before ringing doorbell */ |
| 1209 | wmb(); |
| 1210 | |
| 1211 | /* Inform HW to xmit all TSO segments */ |
| 1212 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, |
| 1213 | sq_num, sq->xdp_desc_cnt); |
| 1214 | sq->xdp_desc_cnt = 0; |
| 1215 | } |
| 1216 | |
| 1217 | static inline void |
| 1218 | nicvf_xdp_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry, |
| 1219 | int subdesc_cnt, u64 data, int len) |
| 1220 | { |
| 1221 | struct sq_hdr_subdesc *hdr; |
| 1222 | |
| 1223 | hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); |
| 1224 | memset(hdr, 0, SND_QUEUE_DESC_SIZE); |
| 1225 | hdr->subdesc_type = SQ_DESC_TYPE_HEADER; |
| 1226 | hdr->subdesc_cnt = subdesc_cnt; |
| 1227 | hdr->tot_len = len; |
| 1228 | hdr->post_cqe = 1; |
| 1229 | sq->xdp_page[qentry] = (u64)virt_to_page((void *)data); |
| 1230 | } |
| 1231 | |
| 1232 | int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq, |
| 1233 | u64 bufaddr, u64 dma_addr, u16 len) |
| 1234 | { |
| 1235 | int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT; |
| 1236 | int qentry; |
| 1237 | |
| 1238 | if (subdesc_cnt > sq->xdp_free_cnt) |
| 1239 | return 0; |
| 1240 | |
| 1241 | qentry = nicvf_get_sq_desc(sq, subdesc_cnt); |
| 1242 | |
| 1243 | nicvf_xdp_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, bufaddr, len); |
| 1244 | |
| 1245 | qentry = nicvf_get_nxt_sqentry(sq, qentry); |
| 1246 | nicvf_sq_add_gather_subdesc(sq, qentry, len, dma_addr); |
| 1247 | |
| 1248 | sq->xdp_desc_cnt += subdesc_cnt; |
| 1249 | |
| 1250 | return 1; |
| 1251 | } |
| 1252 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1253 | /* Calculate no of SQ subdescriptors needed to transmit all |
| 1254 | * segments of this TSO packet. |
| 1255 | * Taken from 'Tilera network driver' with a minor modification. |
| 1256 | */ |
| 1257 | static int nicvf_tso_count_subdescs(struct sk_buff *skb) |
| 1258 | { |
| 1259 | struct skb_shared_info *sh = skb_shinfo(skb); |
| 1260 | unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
| 1261 | unsigned int data_len = skb->len - sh_len; |
| 1262 | unsigned int p_len = sh->gso_size; |
| 1263 | long f_id = -1; /* id of the current fragment */ |
| 1264 | long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ |
| 1265 | long f_used = 0; /* bytes used from the current fragment */ |
| 1266 | long n; /* size of the current piece of payload */ |
| 1267 | int num_edescs = 0; |
| 1268 | int segment; |
| 1269 | |
| 1270 | for (segment = 0; segment < sh->gso_segs; segment++) { |
| 1271 | unsigned int p_used = 0; |
| 1272 | |
| 1273 | /* One edesc for header and for each piece of the payload. */ |
| 1274 | for (num_edescs++; p_used < p_len; num_edescs++) { |
| 1275 | /* Advance as needed. */ |
| 1276 | while (f_used >= f_size) { |
| 1277 | f_id++; |
| 1278 | f_size = skb_frag_size(&sh->frags[f_id]); |
| 1279 | f_used = 0; |
| 1280 | } |
| 1281 | |
| 1282 | /* Use bytes from the current fragment. */ |
| 1283 | n = p_len - p_used; |
| 1284 | if (n > f_size - f_used) |
| 1285 | n = f_size - f_used; |
| 1286 | f_used += n; |
| 1287 | p_used += n; |
| 1288 | } |
| 1289 | |
| 1290 | /* The last segment may be less than gso_size. */ |
| 1291 | data_len -= p_len; |
| 1292 | if (data_len < p_len) |
| 1293 | p_len = data_len; |
| 1294 | } |
| 1295 | |
| 1296 | /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */ |
| 1297 | return num_edescs + sh->gso_segs; |
| 1298 | } |
| 1299 | |
Sunil Goutham | 7ceb8a1 | 2016-08-30 11:36:27 +0530 | [diff] [blame] | 1300 | #define POST_CQE_DESC_COUNT 2 |
| 1301 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1302 | /* Get the number of SQ descriptors needed to xmit this skb */ |
| 1303 | static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb) |
| 1304 | { |
| 1305 | int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT; |
| 1306 | |
Sunil Goutham | 40fb5f8 | 2015-12-10 13:25:19 +0530 | [diff] [blame] | 1307 | if (skb_shinfo(skb)->gso_size && !nic->hw_tso) { |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1308 | subdesc_cnt = nicvf_tso_count_subdescs(skb); |
| 1309 | return subdesc_cnt; |
| 1310 | } |
| 1311 | |
Sunil Goutham | 7ceb8a1 | 2016-08-30 11:36:27 +0530 | [diff] [blame] | 1312 | /* Dummy descriptors to get TSO pkt completion notification */ |
| 1313 | if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) |
| 1314 | subdesc_cnt += POST_CQE_DESC_COUNT; |
| 1315 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1316 | if (skb_shinfo(skb)->nr_frags) |
| 1317 | subdesc_cnt += skb_shinfo(skb)->nr_frags; |
| 1318 | |
| 1319 | return subdesc_cnt; |
| 1320 | } |
| 1321 | |
| 1322 | /* Add SQ HEADER subdescriptor. |
| 1323 | * First subdescriptor for every send descriptor. |
| 1324 | */ |
| 1325 | static inline void |
Sunil Goutham | 40fb5f8 | 2015-12-10 13:25:19 +0530 | [diff] [blame] | 1326 | nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry, |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1327 | int subdesc_cnt, struct sk_buff *skb, int len) |
| 1328 | { |
| 1329 | int proto; |
| 1330 | struct sq_hdr_subdesc *hdr; |
Thanneeru Srinivasulu | 3a9024f | 2017-04-06 16:12:26 +0530 | [diff] [blame] | 1331 | union { |
| 1332 | struct iphdr *v4; |
| 1333 | struct ipv6hdr *v6; |
| 1334 | unsigned char *hdr; |
| 1335 | } ip; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1336 | |
Thanneeru Srinivasulu | 3a9024f | 2017-04-06 16:12:26 +0530 | [diff] [blame] | 1337 | ip.hdr = skb_network_header(skb); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1338 | hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1339 | memset(hdr, 0, SND_QUEUE_DESC_SIZE); |
| 1340 | hdr->subdesc_type = SQ_DESC_TYPE_HEADER; |
Sunil Goutham | 7ceb8a1 | 2016-08-30 11:36:27 +0530 | [diff] [blame] | 1341 | |
| 1342 | if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) { |
| 1343 | /* post_cqe = 0, to avoid HW posting a CQE for every TSO |
| 1344 | * segment transmitted on 88xx. |
| 1345 | */ |
| 1346 | hdr->subdesc_cnt = subdesc_cnt - POST_CQE_DESC_COUNT; |
| 1347 | } else { |
| 1348 | sq->skbuff[qentry] = (u64)skb; |
| 1349 | /* Enable notification via CQE after processing SQE */ |
| 1350 | hdr->post_cqe = 1; |
| 1351 | /* No of subdescriptors following this */ |
| 1352 | hdr->subdesc_cnt = subdesc_cnt; |
| 1353 | } |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1354 | hdr->tot_len = len; |
| 1355 | |
| 1356 | /* Offload checksum calculation to HW */ |
| 1357 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1358 | hdr->csum_l3 = 1; /* Enable IP csum calculation */ |
| 1359 | hdr->l3_offset = skb_network_offset(skb); |
| 1360 | hdr->l4_offset = skb_transport_offset(skb); |
| 1361 | |
Thanneeru Srinivasulu | 3a9024f | 2017-04-06 16:12:26 +0530 | [diff] [blame] | 1362 | proto = (ip.v4->version == 4) ? ip.v4->protocol : |
| 1363 | ip.v6->nexthdr; |
| 1364 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1365 | switch (proto) { |
| 1366 | case IPPROTO_TCP: |
| 1367 | hdr->csum_l4 = SEND_L4_CSUM_TCP; |
| 1368 | break; |
| 1369 | case IPPROTO_UDP: |
| 1370 | hdr->csum_l4 = SEND_L4_CSUM_UDP; |
| 1371 | break; |
| 1372 | case IPPROTO_SCTP: |
| 1373 | hdr->csum_l4 = SEND_L4_CSUM_SCTP; |
| 1374 | break; |
| 1375 | } |
| 1376 | } |
Sunil Goutham | 40fb5f8 | 2015-12-10 13:25:19 +0530 | [diff] [blame] | 1377 | |
| 1378 | if (nic->hw_tso && skb_shinfo(skb)->gso_size) { |
| 1379 | hdr->tso = 1; |
| 1380 | hdr->tso_start = skb_transport_offset(skb) + tcp_hdrlen(skb); |
| 1381 | hdr->tso_max_paysize = skb_shinfo(skb)->gso_size; |
| 1382 | /* For non-tunneled pkts, point this to L2 ethertype */ |
| 1383 | hdr->inner_l3_offset = skb_network_offset(skb) - 2; |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1384 | this_cpu_inc(nic->pnicvf->drv_stats->tx_tso); |
Sunil Goutham | 40fb5f8 | 2015-12-10 13:25:19 +0530 | [diff] [blame] | 1385 | } |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1386 | } |
| 1387 | |
| 1388 | /* SQ GATHER subdescriptor |
| 1389 | * Must follow HDR descriptor |
| 1390 | */ |
| 1391 | static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, |
| 1392 | int size, u64 data) |
| 1393 | { |
| 1394 | struct sq_gather_subdesc *gather; |
| 1395 | |
| 1396 | qentry &= (sq->dmem.q_len - 1); |
| 1397 | gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry); |
| 1398 | |
| 1399 | memset(gather, 0, SND_QUEUE_DESC_SIZE); |
| 1400 | gather->subdesc_type = SQ_DESC_TYPE_GATHER; |
Sunil Goutham | 4b561c1 | 2015-07-29 16:49:36 +0300 | [diff] [blame] | 1401 | gather->ld_type = NIC_SEND_LD_TYPE_E_LDD; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1402 | gather->size = size; |
| 1403 | gather->addr = data; |
| 1404 | } |
| 1405 | |
Sunil Goutham | 7ceb8a1 | 2016-08-30 11:36:27 +0530 | [diff] [blame] | 1406 | /* Add HDR + IMMEDIATE subdescriptors right after descriptors of a TSO |
| 1407 | * packet so that a CQE is posted as a notifation for transmission of |
| 1408 | * TSO packet. |
| 1409 | */ |
| 1410 | static inline void nicvf_sq_add_cqe_subdesc(struct snd_queue *sq, int qentry, |
| 1411 | int tso_sqe, struct sk_buff *skb) |
| 1412 | { |
| 1413 | struct sq_imm_subdesc *imm; |
| 1414 | struct sq_hdr_subdesc *hdr; |
| 1415 | |
| 1416 | sq->skbuff[qentry] = (u64)skb; |
| 1417 | |
| 1418 | hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); |
| 1419 | memset(hdr, 0, SND_QUEUE_DESC_SIZE); |
| 1420 | hdr->subdesc_type = SQ_DESC_TYPE_HEADER; |
| 1421 | /* Enable notification via CQE after processing SQE */ |
| 1422 | hdr->post_cqe = 1; |
| 1423 | /* There is no packet to transmit here */ |
| 1424 | hdr->dont_send = 1; |
| 1425 | hdr->subdesc_cnt = POST_CQE_DESC_COUNT - 1; |
| 1426 | hdr->tot_len = 1; |
| 1427 | /* Actual TSO header SQE index, needed for cleanup */ |
| 1428 | hdr->rsvd2 = tso_sqe; |
| 1429 | |
| 1430 | qentry = nicvf_get_nxt_sqentry(sq, qentry); |
| 1431 | imm = (struct sq_imm_subdesc *)GET_SQ_DESC(sq, qentry); |
| 1432 | memset(imm, 0, SND_QUEUE_DESC_SIZE); |
| 1433 | imm->subdesc_type = SQ_DESC_TYPE_IMMEDIATE; |
| 1434 | imm->len = 1; |
| 1435 | } |
| 1436 | |
Sunil Goutham | 2c204c2 | 2016-09-23 14:42:28 +0530 | [diff] [blame] | 1437 | static inline void nicvf_sq_doorbell(struct nicvf *nic, struct sk_buff *skb, |
| 1438 | int sq_num, int desc_cnt) |
| 1439 | { |
| 1440 | struct netdev_queue *txq; |
| 1441 | |
| 1442 | txq = netdev_get_tx_queue(nic->pnicvf->netdev, |
| 1443 | skb_get_queue_mapping(skb)); |
| 1444 | |
| 1445 | netdev_tx_sent_queue(txq, skb->len); |
| 1446 | |
| 1447 | /* make sure all memory stores are done before ringing doorbell */ |
| 1448 | smp_wmb(); |
| 1449 | |
| 1450 | /* Inform HW to xmit all TSO segments */ |
| 1451 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, |
| 1452 | sq_num, desc_cnt); |
| 1453 | } |
| 1454 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1455 | /* Segment a TSO packet into 'gso_size' segments and append |
| 1456 | * them to SQ for transfer |
| 1457 | */ |
| 1458 | static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, |
Sunil Goutham | 92dc876 | 2015-08-30 12:29:15 +0300 | [diff] [blame] | 1459 | int sq_num, int qentry, struct sk_buff *skb) |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1460 | { |
| 1461 | struct tso_t tso; |
| 1462 | int seg_subdescs = 0, desc_cnt = 0; |
| 1463 | int seg_len, total_len, data_left; |
| 1464 | int hdr_qentry = qentry; |
| 1465 | int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
| 1466 | |
| 1467 | tso_start(skb, &tso); |
| 1468 | total_len = skb->len - hdr_len; |
| 1469 | while (total_len > 0) { |
| 1470 | char *hdr; |
| 1471 | |
| 1472 | /* Save Qentry for adding HDR_SUBDESC at the end */ |
| 1473 | hdr_qentry = qentry; |
| 1474 | |
| 1475 | data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); |
| 1476 | total_len -= data_left; |
| 1477 | |
| 1478 | /* Add segment's header */ |
| 1479 | qentry = nicvf_get_nxt_sqentry(sq, qentry); |
| 1480 | hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE; |
| 1481 | tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); |
| 1482 | nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len, |
| 1483 | sq->tso_hdrs_phys + |
| 1484 | qentry * TSO_HEADER_SIZE); |
| 1485 | /* HDR_SUDESC + GATHER */ |
| 1486 | seg_subdescs = 2; |
| 1487 | seg_len = hdr_len; |
| 1488 | |
| 1489 | /* Add segment's payload fragments */ |
| 1490 | while (data_left > 0) { |
| 1491 | int size; |
| 1492 | |
| 1493 | size = min_t(int, tso.size, data_left); |
| 1494 | |
| 1495 | qentry = nicvf_get_nxt_sqentry(sq, qentry); |
| 1496 | nicvf_sq_add_gather_subdesc(sq, qentry, size, |
| 1497 | virt_to_phys(tso.data)); |
| 1498 | seg_subdescs++; |
| 1499 | seg_len += size; |
| 1500 | |
| 1501 | data_left -= size; |
| 1502 | tso_build_data(skb, &tso, size); |
| 1503 | } |
Sunil Goutham | 40fb5f8 | 2015-12-10 13:25:19 +0530 | [diff] [blame] | 1504 | nicvf_sq_add_hdr_subdesc(nic, sq, hdr_qentry, |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1505 | seg_subdescs - 1, skb, seg_len); |
Sunil Goutham | 143ceb0 | 2015-07-29 16:49:37 +0300 | [diff] [blame] | 1506 | sq->skbuff[hdr_qentry] = (u64)NULL; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1507 | qentry = nicvf_get_nxt_sqentry(sq, qentry); |
| 1508 | |
| 1509 | desc_cnt += seg_subdescs; |
| 1510 | } |
| 1511 | /* Save SKB in the last segment for freeing */ |
| 1512 | sq->skbuff[hdr_qentry] = (u64)skb; |
| 1513 | |
Sunil Goutham | 2c204c2 | 2016-09-23 14:42:28 +0530 | [diff] [blame] | 1514 | nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1515 | |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1516 | this_cpu_inc(nic->pnicvf->drv_stats->tx_tso); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1517 | return 1; |
| 1518 | } |
| 1519 | |
| 1520 | /* Append an skb to a SQ for packet transfer. */ |
Sunil Goutham | bd3ad7d | 2016-12-01 18:24:28 +0530 | [diff] [blame] | 1521 | int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq, |
| 1522 | struct sk_buff *skb, u8 sq_num) |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1523 | { |
| 1524 | int i, size; |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame] | 1525 | int subdesc_cnt, hdr_sqe = 0; |
Sunil Goutham | bd3ad7d | 2016-12-01 18:24:28 +0530 | [diff] [blame] | 1526 | int qentry; |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame] | 1527 | u64 dma_addr; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1528 | |
| 1529 | subdesc_cnt = nicvf_sq_subdesc_required(nic, skb); |
| 1530 | if (subdesc_cnt > atomic_read(&sq->free_cnt)) |
| 1531 | goto append_fail; |
| 1532 | |
| 1533 | qentry = nicvf_get_sq_desc(sq, subdesc_cnt); |
| 1534 | |
| 1535 | /* Check if its a TSO packet */ |
Sunil Goutham | 40fb5f8 | 2015-12-10 13:25:19 +0530 | [diff] [blame] | 1536 | if (skb_shinfo(skb)->gso_size && !nic->hw_tso) |
Sunil Goutham | 92dc876 | 2015-08-30 12:29:15 +0300 | [diff] [blame] | 1537 | return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1538 | |
| 1539 | /* Add SQ header subdesc */ |
Sunil Goutham | 40fb5f8 | 2015-12-10 13:25:19 +0530 | [diff] [blame] | 1540 | nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1, |
| 1541 | skb, skb->len); |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame] | 1542 | hdr_sqe = qentry; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1543 | |
| 1544 | /* Add SQ gather subdescs */ |
| 1545 | qentry = nicvf_get_nxt_sqentry(sq, qentry); |
| 1546 | size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame] | 1547 | /* HW will ensure data coherency, CPU sync not required */ |
| 1548 | dma_addr = dma_map_page_attrs(&nic->pdev->dev, virt_to_page(skb->data), |
| 1549 | offset_in_page(skb->data), size, |
| 1550 | DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); |
| 1551 | if (dma_mapping_error(&nic->pdev->dev, dma_addr)) { |
| 1552 | nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt); |
| 1553 | return 0; |
| 1554 | } |
| 1555 | |
| 1556 | nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1557 | |
| 1558 | /* Check for scattered buffer */ |
| 1559 | if (!skb_is_nonlinear(skb)) |
| 1560 | goto doorbell; |
| 1561 | |
| 1562 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
| 1563 | const struct skb_frag_struct *frag; |
| 1564 | |
| 1565 | frag = &skb_shinfo(skb)->frags[i]; |
| 1566 | |
| 1567 | qentry = nicvf_get_nxt_sqentry(sq, qentry); |
| 1568 | size = skb_frag_size(frag); |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame] | 1569 | dma_addr = dma_map_page_attrs(&nic->pdev->dev, |
| 1570 | skb_frag_page(frag), |
| 1571 | frag->page_offset, size, |
| 1572 | DMA_TO_DEVICE, |
| 1573 | DMA_ATTR_SKIP_CPU_SYNC); |
| 1574 | if (dma_mapping_error(&nic->pdev->dev, dma_addr)) { |
| 1575 | /* Free entire chain of mapped buffers |
| 1576 | * here 'i' = frags mapped + above mapped skb->data |
| 1577 | */ |
| 1578 | nicvf_unmap_sndq_buffers(nic, sq, hdr_sqe, i); |
| 1579 | nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt); |
| 1580 | return 0; |
| 1581 | } |
| 1582 | nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1583 | } |
| 1584 | |
| 1585 | doorbell: |
Sunil Goutham | 7ceb8a1 | 2016-08-30 11:36:27 +0530 | [diff] [blame] | 1586 | if (nic->t88 && skb_shinfo(skb)->gso_size) { |
| 1587 | qentry = nicvf_get_nxt_sqentry(sq, qentry); |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame] | 1588 | nicvf_sq_add_cqe_subdesc(sq, qentry, hdr_sqe, skb); |
Sunil Goutham | 7ceb8a1 | 2016-08-30 11:36:27 +0530 | [diff] [blame] | 1589 | } |
| 1590 | |
Sunil Goutham | 2c204c2 | 2016-09-23 14:42:28 +0530 | [diff] [blame] | 1591 | nicvf_sq_doorbell(nic, skb, sq_num, subdesc_cnt); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1592 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1593 | return 1; |
| 1594 | |
| 1595 | append_fail: |
Sunil Goutham | 92dc876 | 2015-08-30 12:29:15 +0300 | [diff] [blame] | 1596 | /* Use original PCI dev for debug log */ |
| 1597 | nic = nic->pnicvf; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1598 | netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n"); |
| 1599 | return 0; |
| 1600 | } |
| 1601 | |
| 1602 | static inline unsigned frag_num(unsigned i) |
| 1603 | { |
| 1604 | #ifdef __BIG_ENDIAN |
| 1605 | return (i & ~3) + 3 - (i & 3); |
| 1606 | #else |
| 1607 | return i; |
| 1608 | #endif |
| 1609 | } |
| 1610 | |
Sunil Goutham | c56d91c | 2017-05-02 18:36:55 +0530 | [diff] [blame] | 1611 | static void nicvf_unmap_rcv_buffer(struct nicvf *nic, u64 dma_addr, |
| 1612 | u64 buf_addr, bool xdp) |
| 1613 | { |
| 1614 | struct page *page = NULL; |
| 1615 | int len = RCV_FRAG_LEN; |
| 1616 | |
| 1617 | if (xdp) { |
| 1618 | page = virt_to_page(phys_to_virt(buf_addr)); |
| 1619 | /* Check if it's a recycled page, if not |
| 1620 | * unmap the DMA mapping. |
| 1621 | * |
| 1622 | * Recycled page holds an extra reference. |
| 1623 | */ |
| 1624 | if (page_ref_count(page) != 1) |
| 1625 | return; |
Sunil Goutham | e3d06ff | 2017-05-02 18:36:57 +0530 | [diff] [blame] | 1626 | |
| 1627 | len += XDP_PACKET_HEADROOM; |
Sunil Goutham | c56d91c | 2017-05-02 18:36:55 +0530 | [diff] [blame] | 1628 | /* Receive buffers in XDP mode are mapped from page start */ |
| 1629 | dma_addr &= PAGE_MASK; |
| 1630 | } |
| 1631 | dma_unmap_page_attrs(&nic->pdev->dev, dma_addr, len, |
| 1632 | DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); |
| 1633 | } |
| 1634 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1635 | /* Returns SKB for a received packet */ |
Sunil Goutham | c56d91c | 2017-05-02 18:36:55 +0530 | [diff] [blame] | 1636 | struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, |
| 1637 | struct cqe_rx_t *cqe_rx, bool xdp) |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1638 | { |
| 1639 | int frag; |
| 1640 | int payload_len = 0; |
| 1641 | struct sk_buff *skb = NULL; |
Sunil Goutham | a8671ac | 2016-08-12 16:51:37 +0530 | [diff] [blame] | 1642 | struct page *page; |
| 1643 | int offset; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1644 | u16 *rb_lens = NULL; |
| 1645 | u64 *rb_ptrs = NULL; |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame] | 1646 | u64 phys_addr; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1647 | |
| 1648 | rb_lens = (void *)cqe_rx + (3 * sizeof(u64)); |
Sunil Goutham | 02a72bd | 2016-08-12 16:51:28 +0530 | [diff] [blame] | 1649 | /* Except 88xx pass1 on all other chips CQE_RX2_S is added to |
| 1650 | * CQE_RX at word6, hence buffer pointers move by word |
| 1651 | * |
| 1652 | * Use existing 'hw_tso' flag which will be set for all chips |
| 1653 | * except 88xx pass1 instead of a additional cache line |
| 1654 | * access (or miss) by using pci dev's revision. |
| 1655 | */ |
| 1656 | if (!nic->hw_tso) |
| 1657 | rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64)); |
| 1658 | else |
| 1659 | rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64)); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1660 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1661 | for (frag = 0; frag < cqe_rx->rb_cnt; frag++) { |
| 1662 | payload_len = rb_lens[frag_num(frag)]; |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame] | 1663 | phys_addr = nicvf_iova_to_phys(nic, *rb_ptrs); |
| 1664 | if (!phys_addr) { |
| 1665 | if (skb) |
| 1666 | dev_kfree_skb_any(skb); |
| 1667 | return NULL; |
| 1668 | } |
| 1669 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1670 | if (!frag) { |
| 1671 | /* First fragment */ |
Sunil Goutham | c56d91c | 2017-05-02 18:36:55 +0530 | [diff] [blame] | 1672 | nicvf_unmap_rcv_buffer(nic, |
| 1673 | *rb_ptrs - cqe_rx->align_pad, |
| 1674 | phys_addr, xdp); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1675 | skb = nicvf_rb_ptr_to_skb(nic, |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame] | 1676 | phys_addr - cqe_rx->align_pad, |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1677 | payload_len); |
| 1678 | if (!skb) |
| 1679 | return NULL; |
| 1680 | skb_reserve(skb, cqe_rx->align_pad); |
| 1681 | skb_put(skb, payload_len); |
| 1682 | } else { |
| 1683 | /* Add fragments */ |
Sunil Goutham | c56d91c | 2017-05-02 18:36:55 +0530 | [diff] [blame] | 1684 | nicvf_unmap_rcv_buffer(nic, *rb_ptrs, phys_addr, xdp); |
Sunil Goutham | 83abb7d | 2017-03-07 18:09:08 +0530 | [diff] [blame] | 1685 | page = virt_to_page(phys_to_virt(phys_addr)); |
| 1686 | offset = phys_to_virt(phys_addr) - page_address(page); |
Sunil Goutham | a8671ac | 2016-08-12 16:51:37 +0530 | [diff] [blame] | 1687 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, |
| 1688 | offset, payload_len, RCV_FRAG_LEN); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1689 | } |
| 1690 | /* Next buffer pointer */ |
| 1691 | rb_ptrs++; |
| 1692 | } |
| 1693 | return skb; |
| 1694 | } |
| 1695 | |
Yury Norov | b45ceb4 | 2015-12-07 10:30:32 +0530 | [diff] [blame] | 1696 | static u64 nicvf_int_type_to_mask(int int_type, int q_idx) |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1697 | { |
| 1698 | u64 reg_val; |
| 1699 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1700 | switch (int_type) { |
| 1701 | case NICVF_INTR_CQ: |
| 1702 | reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); |
| 1703 | break; |
| 1704 | case NICVF_INTR_SQ: |
| 1705 | reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); |
| 1706 | break; |
| 1707 | case NICVF_INTR_RBDR: |
| 1708 | reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); |
| 1709 | break; |
| 1710 | case NICVF_INTR_PKT_DROP: |
| 1711 | reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT); |
| 1712 | break; |
| 1713 | case NICVF_INTR_TCP_TIMER: |
| 1714 | reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); |
| 1715 | break; |
| 1716 | case NICVF_INTR_MBOX: |
| 1717 | reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT); |
| 1718 | break; |
| 1719 | case NICVF_INTR_QS_ERR: |
Yury Norov | b45ceb4 | 2015-12-07 10:30:32 +0530 | [diff] [blame] | 1720 | reg_val = (1ULL << NICVF_INTR_QS_ERR_SHIFT); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1721 | break; |
| 1722 | default: |
Yury Norov | b45ceb4 | 2015-12-07 10:30:32 +0530 | [diff] [blame] | 1723 | reg_val = 0; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1724 | } |
| 1725 | |
Yury Norov | b45ceb4 | 2015-12-07 10:30:32 +0530 | [diff] [blame] | 1726 | return reg_val; |
| 1727 | } |
| 1728 | |
| 1729 | /* Enable interrupt */ |
| 1730 | void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) |
| 1731 | { |
| 1732 | u64 mask = nicvf_int_type_to_mask(int_type, q_idx); |
| 1733 | |
| 1734 | if (!mask) { |
| 1735 | netdev_dbg(nic->netdev, |
| 1736 | "Failed to enable interrupt: unknown type\n"); |
| 1737 | return; |
| 1738 | } |
| 1739 | nicvf_reg_write(nic, NIC_VF_ENA_W1S, |
| 1740 | nicvf_reg_read(nic, NIC_VF_ENA_W1S) | mask); |
| 1741 | } |
| 1742 | |
| 1743 | /* Disable interrupt */ |
| 1744 | void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx) |
| 1745 | { |
| 1746 | u64 mask = nicvf_int_type_to_mask(int_type, q_idx); |
| 1747 | |
| 1748 | if (!mask) { |
| 1749 | netdev_dbg(nic->netdev, |
| 1750 | "Failed to disable interrupt: unknown type\n"); |
| 1751 | return; |
| 1752 | } |
| 1753 | |
| 1754 | nicvf_reg_write(nic, NIC_VF_ENA_W1C, mask); |
| 1755 | } |
| 1756 | |
| 1757 | /* Clear interrupt */ |
| 1758 | void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx) |
| 1759 | { |
| 1760 | u64 mask = nicvf_int_type_to_mask(int_type, q_idx); |
| 1761 | |
| 1762 | if (!mask) { |
| 1763 | netdev_dbg(nic->netdev, |
| 1764 | "Failed to clear interrupt: unknown type\n"); |
| 1765 | return; |
| 1766 | } |
| 1767 | |
| 1768 | nicvf_reg_write(nic, NIC_VF_INT, mask); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1769 | } |
| 1770 | |
| 1771 | /* Check if interrupt is enabled */ |
| 1772 | int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx) |
| 1773 | { |
Yury Norov | b45ceb4 | 2015-12-07 10:30:32 +0530 | [diff] [blame] | 1774 | u64 mask = nicvf_int_type_to_mask(int_type, q_idx); |
| 1775 | /* If interrupt type is unknown, we treat it disabled. */ |
| 1776 | if (!mask) { |
| 1777 | netdev_dbg(nic->netdev, |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1778 | "Failed to check interrupt enable: unknown type\n"); |
Yury Norov | b45ceb4 | 2015-12-07 10:30:32 +0530 | [diff] [blame] | 1779 | return 0; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1780 | } |
| 1781 | |
Yury Norov | b45ceb4 | 2015-12-07 10:30:32 +0530 | [diff] [blame] | 1782 | return mask & nicvf_reg_read(nic, NIC_VF_ENA_W1S); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1783 | } |
| 1784 | |
| 1785 | void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx) |
| 1786 | { |
| 1787 | struct rcv_queue *rq; |
| 1788 | |
| 1789 | #define GET_RQ_STATS(reg) \ |
| 1790 | nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\ |
| 1791 | (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) |
| 1792 | |
| 1793 | rq = &nic->qs->rq[rq_idx]; |
| 1794 | rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS); |
| 1795 | rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS); |
| 1796 | } |
| 1797 | |
| 1798 | void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx) |
| 1799 | { |
| 1800 | struct snd_queue *sq; |
| 1801 | |
| 1802 | #define GET_SQ_STATS(reg) \ |
| 1803 | nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\ |
| 1804 | (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) |
| 1805 | |
| 1806 | sq = &nic->qs->sq[sq_idx]; |
| 1807 | sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS); |
| 1808 | sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS); |
| 1809 | } |
| 1810 | |
| 1811 | /* Check for errors in the receive cmp.queue entry */ |
Sunil Goutham | ad2eceb | 2016-02-16 16:29:51 +0530 | [diff] [blame] | 1812 | int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx) |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1813 | { |
Joe Perches | bf24e13 | 2017-06-27 03:56:54 -0700 | [diff] [blame] | 1814 | netif_err(nic, rx_err, nic->netdev, |
| 1815 | "RX error CQE err_level 0x%x err_opcode 0x%x\n", |
| 1816 | cqe_rx->err_level, cqe_rx->err_opcode); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1817 | |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1818 | switch (cqe_rx->err_opcode) { |
| 1819 | case CQ_RX_ERROP_RE_PARTIAL: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1820 | this_cpu_inc(nic->drv_stats->rx_bgx_truncated_pkts); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1821 | break; |
| 1822 | case CQ_RX_ERROP_RE_JABBER: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1823 | this_cpu_inc(nic->drv_stats->rx_jabber_errs); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1824 | break; |
| 1825 | case CQ_RX_ERROP_RE_FCS: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1826 | this_cpu_inc(nic->drv_stats->rx_fcs_errs); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1827 | break; |
| 1828 | case CQ_RX_ERROP_RE_RX_CTL: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1829 | this_cpu_inc(nic->drv_stats->rx_bgx_errs); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1830 | break; |
| 1831 | case CQ_RX_ERROP_PREL2_ERR: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1832 | this_cpu_inc(nic->drv_stats->rx_prel2_errs); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1833 | break; |
| 1834 | case CQ_RX_ERROP_L2_MAL: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1835 | this_cpu_inc(nic->drv_stats->rx_l2_hdr_malformed); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1836 | break; |
| 1837 | case CQ_RX_ERROP_L2_OVERSIZE: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1838 | this_cpu_inc(nic->drv_stats->rx_oversize); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1839 | break; |
| 1840 | case CQ_RX_ERROP_L2_UNDERSIZE: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1841 | this_cpu_inc(nic->drv_stats->rx_undersize); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1842 | break; |
| 1843 | case CQ_RX_ERROP_L2_LENMISM: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1844 | this_cpu_inc(nic->drv_stats->rx_l2_len_mismatch); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1845 | break; |
| 1846 | case CQ_RX_ERROP_L2_PCLP: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1847 | this_cpu_inc(nic->drv_stats->rx_l2_pclp); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1848 | break; |
| 1849 | case CQ_RX_ERROP_IP_NOT: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1850 | this_cpu_inc(nic->drv_stats->rx_ip_ver_errs); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1851 | break; |
| 1852 | case CQ_RX_ERROP_IP_CSUM_ERR: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1853 | this_cpu_inc(nic->drv_stats->rx_ip_csum_errs); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1854 | break; |
| 1855 | case CQ_RX_ERROP_IP_MAL: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1856 | this_cpu_inc(nic->drv_stats->rx_ip_hdr_malformed); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1857 | break; |
| 1858 | case CQ_RX_ERROP_IP_MALD: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1859 | this_cpu_inc(nic->drv_stats->rx_ip_payload_malformed); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1860 | break; |
| 1861 | case CQ_RX_ERROP_IP_HOP: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1862 | this_cpu_inc(nic->drv_stats->rx_ip_ttl_errs); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1863 | break; |
| 1864 | case CQ_RX_ERROP_L3_PCLP: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1865 | this_cpu_inc(nic->drv_stats->rx_l3_pclp); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1866 | break; |
| 1867 | case CQ_RX_ERROP_L4_MAL: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1868 | this_cpu_inc(nic->drv_stats->rx_l4_malformed); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1869 | break; |
| 1870 | case CQ_RX_ERROP_L4_CHK: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1871 | this_cpu_inc(nic->drv_stats->rx_l4_csum_errs); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1872 | break; |
| 1873 | case CQ_RX_ERROP_UDP_LEN: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1874 | this_cpu_inc(nic->drv_stats->rx_udp_len_errs); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1875 | break; |
| 1876 | case CQ_RX_ERROP_L4_PORT: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1877 | this_cpu_inc(nic->drv_stats->rx_l4_port_errs); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1878 | break; |
| 1879 | case CQ_RX_ERROP_TCP_FLAG: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1880 | this_cpu_inc(nic->drv_stats->rx_tcp_flag_errs); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1881 | break; |
| 1882 | case CQ_RX_ERROP_TCP_OFFSET: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1883 | this_cpu_inc(nic->drv_stats->rx_tcp_offset_errs); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1884 | break; |
| 1885 | case CQ_RX_ERROP_L4_PCLP: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1886 | this_cpu_inc(nic->drv_stats->rx_l4_pclp); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1887 | break; |
| 1888 | case CQ_RX_ERROP_RBDR_TRUNC: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1889 | this_cpu_inc(nic->drv_stats->rx_truncated_pkts); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1890 | break; |
| 1891 | } |
| 1892 | |
| 1893 | return 1; |
| 1894 | } |
| 1895 | |
| 1896 | /* Check for errors in the send cmp.queue entry */ |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1897 | int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx) |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1898 | { |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1899 | switch (cqe_tx->send_status) { |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1900 | case CQ_TX_ERROP_DESC_FAULT: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1901 | this_cpu_inc(nic->drv_stats->tx_desc_fault); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1902 | break; |
| 1903 | case CQ_TX_ERROP_HDR_CONS_ERR: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1904 | this_cpu_inc(nic->drv_stats->tx_hdr_cons_err); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1905 | break; |
| 1906 | case CQ_TX_ERROP_SUBDC_ERR: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1907 | this_cpu_inc(nic->drv_stats->tx_subdesc_err); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1908 | break; |
Sunil Goutham | 712c318 | 2016-11-15 17:37:36 +0530 | [diff] [blame] | 1909 | case CQ_TX_ERROP_MAX_SIZE_VIOL: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1910 | this_cpu_inc(nic->drv_stats->tx_max_size_exceeded); |
Sunil Goutham | 712c318 | 2016-11-15 17:37:36 +0530 | [diff] [blame] | 1911 | break; |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1912 | case CQ_TX_ERROP_IMM_SIZE_OFLOW: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1913 | this_cpu_inc(nic->drv_stats->tx_imm_size_oflow); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1914 | break; |
| 1915 | case CQ_TX_ERROP_DATA_SEQUENCE_ERR: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1916 | this_cpu_inc(nic->drv_stats->tx_data_seq_err); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1917 | break; |
| 1918 | case CQ_TX_ERROP_MEM_SEQUENCE_ERR: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1919 | this_cpu_inc(nic->drv_stats->tx_mem_seq_err); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1920 | break; |
| 1921 | case CQ_TX_ERROP_LOCK_VIOL: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1922 | this_cpu_inc(nic->drv_stats->tx_lock_viol); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1923 | break; |
| 1924 | case CQ_TX_ERROP_DATA_FAULT: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1925 | this_cpu_inc(nic->drv_stats->tx_data_fault); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1926 | break; |
| 1927 | case CQ_TX_ERROP_TSTMP_CONFLICT: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1928 | this_cpu_inc(nic->drv_stats->tx_tstmp_conflict); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1929 | break; |
| 1930 | case CQ_TX_ERROP_TSTMP_TIMEOUT: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1931 | this_cpu_inc(nic->drv_stats->tx_tstmp_timeout); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1932 | break; |
| 1933 | case CQ_TX_ERROP_MEM_FAULT: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1934 | this_cpu_inc(nic->drv_stats->tx_mem_fault); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1935 | break; |
| 1936 | case CQ_TX_ERROP_CK_OVERLAP: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1937 | this_cpu_inc(nic->drv_stats->tx_csum_overlap); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1938 | break; |
| 1939 | case CQ_TX_ERROP_CK_OFLOW: |
Sunil Goutham | 964cb69 | 2016-11-15 17:38:16 +0530 | [diff] [blame] | 1940 | this_cpu_inc(nic->drv_stats->tx_csum_overflow); |
Sunil Goutham | 4863dea | 2015-05-26 19:20:15 -0700 | [diff] [blame] | 1941 | break; |
| 1942 | } |
| 1943 | |
| 1944 | return 1; |
| 1945 | } |