Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. |
| 3 | * |
| 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the |
| 8 | * OpenIB.org BSD license below: |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or |
| 11 | * without modification, are permitted provided that the following |
| 12 | * conditions are met: |
| 13 | * |
| 14 | * - Redistributions of source code must retain the above |
| 15 | * copyright notice, this list of conditions and the following |
| 16 | * disclaimer. |
| 17 | * |
| 18 | * - Redistributions in binary form must reproduce the above |
| 19 | * copyright notice, this list of conditions and the following |
| 20 | * disclaimer in the documentation and/or other materials |
| 21 | * provided with the distribution. |
| 22 | * |
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 30 | * SOFTWARE. |
| 31 | * |
| 32 | */ |
| 33 | |
Eliezer Tamir | 076bb0c | 2013-07-10 17:13:17 +0300 | [diff] [blame] | 34 | #include <net/busy_poll.h> |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 35 | #include <linux/mlx4/cq.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 36 | #include <linux/slab.h> |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 37 | #include <linux/mlx4/qp.h> |
| 38 | #include <linux/skbuff.h> |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 39 | #include <linux/rculist.h> |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 40 | #include <linux/if_ether.h> |
| 41 | #include <linux/if_vlan.h> |
| 42 | #include <linux/vmalloc.h> |
Amir Vadai | 35f6f45 | 2014-06-29 11:54:55 +0300 | [diff] [blame] | 43 | #include <linux/irq.h> |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 44 | |
Shani Michaeli | f8c6455 | 2014-11-09 13:51:53 +0200 | [diff] [blame] | 45 | #if IS_ENABLED(CONFIG_IPV6) |
| 46 | #include <net/ip6_checksum.h> |
| 47 | #endif |
| 48 | |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 49 | #include "mlx4_en.h" |
| 50 | |
Eric Dumazet | 51151a1 | 2013-06-23 08:17:56 -0700 | [diff] [blame] | 51 | static int mlx4_alloc_pages(struct mlx4_en_priv *priv, |
| 52 | struct mlx4_en_rx_alloc *page_alloc, |
| 53 | const struct mlx4_en_frag_info *frag_info, |
| 54 | gfp_t _gfp) |
| 55 | { |
| 56 | int order; |
| 57 | struct page *page; |
| 58 | dma_addr_t dma; |
| 59 | |
| 60 | for (order = MLX4_EN_ALLOC_PREFER_ORDER; ;) { |
| 61 | gfp_t gfp = _gfp; |
| 62 | |
| 63 | if (order) |
| 64 | gfp |= __GFP_COMP | __GFP_NOWARN; |
| 65 | page = alloc_pages(gfp, order); |
| 66 | if (likely(page)) |
| 67 | break; |
| 68 | if (--order < 0 || |
| 69 | ((PAGE_SIZE << order) < frag_info->frag_size)) |
| 70 | return -ENOMEM; |
| 71 | } |
| 72 | dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE << order, |
| 73 | PCI_DMA_FROMDEVICE); |
| 74 | if (dma_mapping_error(priv->ddev, dma)) { |
| 75 | put_page(page); |
| 76 | return -ENOMEM; |
| 77 | } |
Amir Vadai | 70fbe07 | 2013-10-07 13:38:12 +0200 | [diff] [blame] | 78 | page_alloc->page_size = PAGE_SIZE << order; |
Eric Dumazet | 51151a1 | 2013-06-23 08:17:56 -0700 | [diff] [blame] | 79 | page_alloc->page = page; |
| 80 | page_alloc->dma = dma; |
Ido Shamay | 5f6e980 | 2014-11-02 16:26:15 +0200 | [diff] [blame] | 81 | page_alloc->page_offset = 0; |
Eric Dumazet | 51151a1 | 2013-06-23 08:17:56 -0700 | [diff] [blame] | 82 | /* Not doing get_page() for each frag is a big win |
Eric Dumazet | 9822620 | 2014-10-10 04:48:17 -0700 | [diff] [blame] | 83 | * on asymetric workloads. Note we can not use atomic_set(). |
Eric Dumazet | 51151a1 | 2013-06-23 08:17:56 -0700 | [diff] [blame] | 84 | */ |
Joonsoo Kim | fe896d1 | 2016-03-17 14:19:26 -0700 | [diff] [blame^] | 85 | page_ref_add(page, page_alloc->page_size / frag_info->frag_stride - 1); |
Eric Dumazet | 51151a1 | 2013-06-23 08:17:56 -0700 | [diff] [blame] | 86 | return 0; |
| 87 | } |
| 88 | |
Thadeu Lima de Souza Cascardo | 4cce66c | 2012-07-16 07:01:53 +0000 | [diff] [blame] | 89 | static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv, |
| 90 | struct mlx4_en_rx_desc *rx_desc, |
| 91 | struct mlx4_en_rx_alloc *frags, |
Eric Dumazet | 51151a1 | 2013-06-23 08:17:56 -0700 | [diff] [blame] | 92 | struct mlx4_en_rx_alloc *ring_alloc, |
| 93 | gfp_t gfp) |
Thadeu Lima de Souza Cascardo | 4cce66c | 2012-07-16 07:01:53 +0000 | [diff] [blame] | 94 | { |
| 95 | struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS]; |
Eric Dumazet | 51151a1 | 2013-06-23 08:17:56 -0700 | [diff] [blame] | 96 | const struct mlx4_en_frag_info *frag_info; |
Thadeu Lima de Souza Cascardo | 4cce66c | 2012-07-16 07:01:53 +0000 | [diff] [blame] | 97 | struct page *page; |
| 98 | dma_addr_t dma; |
| 99 | int i; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 100 | |
Thadeu Lima de Souza Cascardo | 4cce66c | 2012-07-16 07:01:53 +0000 | [diff] [blame] | 101 | for (i = 0; i < priv->num_frags; i++) { |
| 102 | frag_info = &priv->frag_info[i]; |
Eric Dumazet | 51151a1 | 2013-06-23 08:17:56 -0700 | [diff] [blame] | 103 | page_alloc[i] = ring_alloc[i]; |
Amir Vadai | 70fbe07 | 2013-10-07 13:38:12 +0200 | [diff] [blame] | 104 | page_alloc[i].page_offset += frag_info->frag_stride; |
| 105 | |
| 106 | if (page_alloc[i].page_offset + frag_info->frag_stride <= |
| 107 | ring_alloc[i].page_size) |
Eric Dumazet | 51151a1 | 2013-06-23 08:17:56 -0700 | [diff] [blame] | 108 | continue; |
Amir Vadai | 70fbe07 | 2013-10-07 13:38:12 +0200 | [diff] [blame] | 109 | |
Eric Dumazet | 51151a1 | 2013-06-23 08:17:56 -0700 | [diff] [blame] | 110 | if (mlx4_alloc_pages(priv, &page_alloc[i], frag_info, gfp)) |
| 111 | goto out; |
Thadeu Lima de Souza Cascardo | 4cce66c | 2012-07-16 07:01:53 +0000 | [diff] [blame] | 112 | } |
| 113 | |
| 114 | for (i = 0; i < priv->num_frags; i++) { |
| 115 | frags[i] = ring_alloc[i]; |
Amir Vadai | 70fbe07 | 2013-10-07 13:38:12 +0200 | [diff] [blame] | 116 | dma = ring_alloc[i].dma + ring_alloc[i].page_offset; |
Thadeu Lima de Souza Cascardo | 4cce66c | 2012-07-16 07:01:53 +0000 | [diff] [blame] | 117 | ring_alloc[i] = page_alloc[i]; |
| 118 | rx_desc->data[i].addr = cpu_to_be64(dma); |
| 119 | } |
| 120 | |
| 121 | return 0; |
| 122 | |
Thadeu Lima de Souza Cascardo | 4cce66c | 2012-07-16 07:01:53 +0000 | [diff] [blame] | 123 | out: |
| 124 | while (i--) { |
Eric Dumazet | 51151a1 | 2013-06-23 08:17:56 -0700 | [diff] [blame] | 125 | if (page_alloc[i].page != ring_alloc[i].page) { |
Thadeu Lima de Souza Cascardo | 4cce66c | 2012-07-16 07:01:53 +0000 | [diff] [blame] | 126 | dma_unmap_page(priv->ddev, page_alloc[i].dma, |
Amir Vadai | 70fbe07 | 2013-10-07 13:38:12 +0200 | [diff] [blame] | 127 | page_alloc[i].page_size, PCI_DMA_FROMDEVICE); |
Eric Dumazet | 51151a1 | 2013-06-23 08:17:56 -0700 | [diff] [blame] | 128 | page = page_alloc[i].page; |
Joonsoo Kim | fe896d1 | 2016-03-17 14:19:26 -0700 | [diff] [blame^] | 129 | set_page_count(page, 1); |
Eric Dumazet | 51151a1 | 2013-06-23 08:17:56 -0700 | [diff] [blame] | 130 | put_page(page); |
| 131 | } |
Thadeu Lima de Souza Cascardo | 4cce66c | 2012-07-16 07:01:53 +0000 | [diff] [blame] | 132 | } |
| 133 | return -ENOMEM; |
| 134 | } |
| 135 | |
| 136 | static void mlx4_en_free_frag(struct mlx4_en_priv *priv, |
| 137 | struct mlx4_en_rx_alloc *frags, |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 138 | int i) |
| 139 | { |
Eric Dumazet | 51151a1 | 2013-06-23 08:17:56 -0700 | [diff] [blame] | 140 | const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; |
Amir Vadai | 021f110 | 2013-10-07 13:38:13 +0200 | [diff] [blame] | 141 | u32 next_frag_end = frags[i].page_offset + 2 * frag_info->frag_stride; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 142 | |
Amir Vadai | 021f110 | 2013-10-07 13:38:13 +0200 | [diff] [blame] | 143 | |
| 144 | if (next_frag_end > frags[i].page_size) |
Amir Vadai | 70fbe07 | 2013-10-07 13:38:12 +0200 | [diff] [blame] | 145 | dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size, |
| 146 | PCI_DMA_FROMDEVICE); |
Eric Dumazet | 51151a1 | 2013-06-23 08:17:56 -0700 | [diff] [blame] | 147 | |
Thadeu Lima de Souza Cascardo | 4cce66c | 2012-07-16 07:01:53 +0000 | [diff] [blame] | 148 | if (frags[i].page) |
| 149 | put_page(frags[i].page); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 150 | } |
| 151 | |
| 152 | static int mlx4_en_init_allocator(struct mlx4_en_priv *priv, |
| 153 | struct mlx4_en_rx_ring *ring) |
| 154 | { |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 155 | int i; |
Eric Dumazet | 51151a1 | 2013-06-23 08:17:56 -0700 | [diff] [blame] | 156 | struct mlx4_en_rx_alloc *page_alloc; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 157 | |
| 158 | for (i = 0; i < priv->num_frags; i++) { |
Eric Dumazet | 51151a1 | 2013-06-23 08:17:56 -0700 | [diff] [blame] | 159 | const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 160 | |
Eric Dumazet | 51151a1 | 2013-06-23 08:17:56 -0700 | [diff] [blame] | 161 | if (mlx4_alloc_pages(priv, &ring->page_alloc[i], |
Ido Shamay | 1ab25f8 | 2014-11-02 16:26:16 +0200 | [diff] [blame] | 162 | frag_info, GFP_KERNEL | __GFP_COLD)) |
Thadeu Lima de Souza Cascardo | 4cce66c | 2012-07-16 07:01:53 +0000 | [diff] [blame] | 163 | goto out; |
Ido Shamay | b110d2c | 2015-02-03 17:57:19 +0200 | [diff] [blame] | 164 | |
| 165 | en_dbg(DRV, priv, " frag %d allocator: - size:%d frags:%d\n", |
| 166 | i, ring->page_alloc[i].page_size, |
Joonsoo Kim | fe896d1 | 2016-03-17 14:19:26 -0700 | [diff] [blame^] | 167 | page_ref_count(ring->page_alloc[i].page)); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 168 | } |
| 169 | return 0; |
| 170 | |
| 171 | out: |
| 172 | while (i--) { |
Eric Dumazet | 51151a1 | 2013-06-23 08:17:56 -0700 | [diff] [blame] | 173 | struct page *page; |
| 174 | |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 175 | page_alloc = &ring->page_alloc[i]; |
Thadeu Lima de Souza Cascardo | 4cce66c | 2012-07-16 07:01:53 +0000 | [diff] [blame] | 176 | dma_unmap_page(priv->ddev, page_alloc->dma, |
Amir Vadai | 70fbe07 | 2013-10-07 13:38:12 +0200 | [diff] [blame] | 177 | page_alloc->page_size, PCI_DMA_FROMDEVICE); |
Eric Dumazet | 51151a1 | 2013-06-23 08:17:56 -0700 | [diff] [blame] | 178 | page = page_alloc->page; |
Joonsoo Kim | fe896d1 | 2016-03-17 14:19:26 -0700 | [diff] [blame^] | 179 | set_page_count(page, 1); |
Eric Dumazet | 51151a1 | 2013-06-23 08:17:56 -0700 | [diff] [blame] | 180 | put_page(page); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 181 | page_alloc->page = NULL; |
| 182 | } |
| 183 | return -ENOMEM; |
| 184 | } |
| 185 | |
| 186 | static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv, |
| 187 | struct mlx4_en_rx_ring *ring) |
| 188 | { |
| 189 | struct mlx4_en_rx_alloc *page_alloc; |
| 190 | int i; |
| 191 | |
| 192 | for (i = 0; i < priv->num_frags; i++) { |
Eric Dumazet | 51151a1 | 2013-06-23 08:17:56 -0700 | [diff] [blame] | 193 | const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; |
| 194 | |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 195 | page_alloc = &ring->page_alloc[i]; |
Yevgeny Petrilin | 453a608 | 2009-06-01 20:27:13 +0000 | [diff] [blame] | 196 | en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n", |
| 197 | i, page_count(page_alloc->page)); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 198 | |
Thadeu Lima de Souza Cascardo | 4cce66c | 2012-07-16 07:01:53 +0000 | [diff] [blame] | 199 | dma_unmap_page(priv->ddev, page_alloc->dma, |
Amir Vadai | 70fbe07 | 2013-10-07 13:38:12 +0200 | [diff] [blame] | 200 | page_alloc->page_size, PCI_DMA_FROMDEVICE); |
| 201 | while (page_alloc->page_offset + frag_info->frag_stride < |
| 202 | page_alloc->page_size) { |
Eric Dumazet | 51151a1 | 2013-06-23 08:17:56 -0700 | [diff] [blame] | 203 | put_page(page_alloc->page); |
Amir Vadai | 70fbe07 | 2013-10-07 13:38:12 +0200 | [diff] [blame] | 204 | page_alloc->page_offset += frag_info->frag_stride; |
Eric Dumazet | 51151a1 | 2013-06-23 08:17:56 -0700 | [diff] [blame] | 205 | } |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 206 | page_alloc->page = NULL; |
| 207 | } |
| 208 | } |
| 209 | |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 210 | static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv, |
| 211 | struct mlx4_en_rx_ring *ring, int index) |
| 212 | { |
| 213 | struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 214 | int possible_frags; |
| 215 | int i; |
| 216 | |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 217 | /* Set size and memtype fields */ |
| 218 | for (i = 0; i < priv->num_frags; i++) { |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 219 | rx_desc->data[i].byte_count = |
| 220 | cpu_to_be32(priv->frag_info[i].frag_size); |
| 221 | rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key); |
| 222 | } |
| 223 | |
| 224 | /* If the number of used fragments does not fill up the ring stride, |
| 225 | * remaining (unused) fragments must be padded with null address/size |
| 226 | * and a special memory key */ |
| 227 | possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE; |
| 228 | for (i = priv->num_frags; i < possible_frags; i++) { |
| 229 | rx_desc->data[i].byte_count = 0; |
| 230 | rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD); |
| 231 | rx_desc->data[i].addr = 0; |
| 232 | } |
| 233 | } |
| 234 | |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 235 | static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv, |
Eric Dumazet | 51151a1 | 2013-06-23 08:17:56 -0700 | [diff] [blame] | 236 | struct mlx4_en_rx_ring *ring, int index, |
| 237 | gfp_t gfp) |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 238 | { |
| 239 | struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride); |
Thadeu Lima de Souza Cascardo | 4cce66c | 2012-07-16 07:01:53 +0000 | [diff] [blame] | 240 | struct mlx4_en_rx_alloc *frags = ring->rx_info + |
| 241 | (index << priv->log_rx_info); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 242 | |
Eric Dumazet | 51151a1 | 2013-06-23 08:17:56 -0700 | [diff] [blame] | 243 | return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 244 | } |
| 245 | |
Ido Shamay | 07841f9 | 2015-04-30 17:32:46 +0300 | [diff] [blame] | 246 | static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring) |
| 247 | { |
Ido Shamay | 07841f9 | 2015-04-30 17:32:46 +0300 | [diff] [blame] | 248 | return ring->prod == ring->cons; |
| 249 | } |
| 250 | |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 251 | static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring) |
| 252 | { |
| 253 | *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff); |
| 254 | } |
| 255 | |
Yevgeny Petrilin | 38aab07 | 2009-05-24 03:17:11 +0000 | [diff] [blame] | 256 | static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv, |
| 257 | struct mlx4_en_rx_ring *ring, |
| 258 | int index) |
| 259 | { |
Thadeu Lima de Souza Cascardo | 4cce66c | 2012-07-16 07:01:53 +0000 | [diff] [blame] | 260 | struct mlx4_en_rx_alloc *frags; |
Yevgeny Petrilin | 38aab07 | 2009-05-24 03:17:11 +0000 | [diff] [blame] | 261 | int nr; |
| 262 | |
Thadeu Lima de Souza Cascardo | 4cce66c | 2012-07-16 07:01:53 +0000 | [diff] [blame] | 263 | frags = ring->rx_info + (index << priv->log_rx_info); |
Yevgeny Petrilin | 38aab07 | 2009-05-24 03:17:11 +0000 | [diff] [blame] | 264 | for (nr = 0; nr < priv->num_frags; nr++) { |
Yevgeny Petrilin | 453a608 | 2009-06-01 20:27:13 +0000 | [diff] [blame] | 265 | en_dbg(DRV, priv, "Freeing fragment:%d\n", nr); |
Thadeu Lima de Souza Cascardo | 4cce66c | 2012-07-16 07:01:53 +0000 | [diff] [blame] | 266 | mlx4_en_free_frag(priv, frags, nr); |
Yevgeny Petrilin | 38aab07 | 2009-05-24 03:17:11 +0000 | [diff] [blame] | 267 | } |
| 268 | } |
| 269 | |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 270 | static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) |
| 271 | { |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 272 | struct mlx4_en_rx_ring *ring; |
| 273 | int ring_ind; |
| 274 | int buf_ind; |
Yevgeny Petrilin | 38aab07 | 2009-05-24 03:17:11 +0000 | [diff] [blame] | 275 | int new_size; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 276 | |
| 277 | for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) { |
| 278 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { |
Eugenia Emantayev | 41d942d | 2013-11-07 12:19:52 +0200 | [diff] [blame] | 279 | ring = priv->rx_ring[ring_ind]; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 280 | |
| 281 | if (mlx4_en_prepare_rx_desc(priv, ring, |
Eric Dumazet | 51151a1 | 2013-06-23 08:17:56 -0700 | [diff] [blame] | 282 | ring->actual_size, |
Ido Shamay | 1ab25f8 | 2014-11-02 16:26:16 +0200 | [diff] [blame] | 283 | GFP_KERNEL | __GFP_COLD)) { |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 284 | if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) { |
Joe Perches | 1a91de2 | 2014-05-07 12:52:57 -0700 | [diff] [blame] | 285 | en_err(priv, "Failed to allocate enough rx buffers\n"); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 286 | return -ENOMEM; |
| 287 | } else { |
Yevgeny Petrilin | 38aab07 | 2009-05-24 03:17:11 +0000 | [diff] [blame] | 288 | new_size = rounddown_pow_of_two(ring->actual_size); |
Joe Perches | 1a91de2 | 2014-05-07 12:52:57 -0700 | [diff] [blame] | 289 | en_warn(priv, "Only %d buffers allocated reducing ring size to %d\n", |
Yevgeny Petrilin | 453a608 | 2009-06-01 20:27:13 +0000 | [diff] [blame] | 290 | ring->actual_size, new_size); |
Yevgeny Petrilin | 38aab07 | 2009-05-24 03:17:11 +0000 | [diff] [blame] | 291 | goto reduce_rings; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 292 | } |
| 293 | } |
| 294 | ring->actual_size++; |
| 295 | ring->prod++; |
| 296 | } |
| 297 | } |
Yevgeny Petrilin | 38aab07 | 2009-05-24 03:17:11 +0000 | [diff] [blame] | 298 | return 0; |
| 299 | |
| 300 | reduce_rings: |
| 301 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { |
Eugenia Emantayev | 41d942d | 2013-11-07 12:19:52 +0200 | [diff] [blame] | 302 | ring = priv->rx_ring[ring_ind]; |
Yevgeny Petrilin | 38aab07 | 2009-05-24 03:17:11 +0000 | [diff] [blame] | 303 | while (ring->actual_size > new_size) { |
| 304 | ring->actual_size--; |
| 305 | ring->prod--; |
| 306 | mlx4_en_free_rx_desc(priv, ring, ring->actual_size); |
| 307 | } |
Yevgeny Petrilin | 38aab07 | 2009-05-24 03:17:11 +0000 | [diff] [blame] | 308 | } |
| 309 | |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 310 | return 0; |
| 311 | } |
| 312 | |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 313 | static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, |
| 314 | struct mlx4_en_rx_ring *ring) |
| 315 | { |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 316 | int index; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 317 | |
Yevgeny Petrilin | 453a608 | 2009-06-01 20:27:13 +0000 | [diff] [blame] | 318 | en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n", |
| 319 | ring->cons, ring->prod); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 320 | |
| 321 | /* Unmap and free Rx buffers */ |
Ido Shamay | 07841f9 | 2015-04-30 17:32:46 +0300 | [diff] [blame] | 322 | while (!mlx4_en_is_ring_empty(ring)) { |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 323 | index = ring->cons & ring->size_mask; |
Yevgeny Petrilin | 453a608 | 2009-06-01 20:27:13 +0000 | [diff] [blame] | 324 | en_dbg(DRV, priv, "Processing descriptor:%d\n", index); |
Yevgeny Petrilin | 38aab07 | 2009-05-24 03:17:11 +0000 | [diff] [blame] | 325 | mlx4_en_free_rx_desc(priv, ring, index); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 326 | ++ring->cons; |
| 327 | } |
| 328 | } |
| 329 | |
Ido Shamay | 0251248 | 2014-02-21 12:39:17 +0200 | [diff] [blame] | 330 | void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev) |
| 331 | { |
| 332 | int i; |
| 333 | int num_of_eqs; |
Ido Shamay | bb2146b | 2014-02-21 12:39:18 +0200 | [diff] [blame] | 334 | int num_rx_rings; |
Ido Shamay | 0251248 | 2014-02-21 12:39:17 +0200 | [diff] [blame] | 335 | struct mlx4_dev *dev = mdev->dev; |
| 336 | |
| 337 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { |
Matan Barak | c66fa19 | 2015-05-31 09:30:16 +0300 | [diff] [blame] | 338 | num_of_eqs = max_t(int, MIN_RX_RINGS, |
| 339 | min_t(int, |
| 340 | mlx4_get_eqs_per_port(mdev->dev, i), |
| 341 | DEF_RX_RINGS)); |
Ido Shamay | 0251248 | 2014-02-21 12:39:17 +0200 | [diff] [blame] | 342 | |
Amir Vadai | ea1c1af | 2014-07-22 15:44:12 +0300 | [diff] [blame] | 343 | num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS : |
| 344 | min_t(int, num_of_eqs, |
| 345 | netif_get_num_default_rss_queues()); |
Ido Shamay | 0251248 | 2014-02-21 12:39:17 +0200 | [diff] [blame] | 346 | mdev->profile.prof[i].rx_ring_num = |
Ido Shamay | bb2146b | 2014-02-21 12:39:18 +0200 | [diff] [blame] | 347 | rounddown_pow_of_two(num_rx_rings); |
Ido Shamay | 0251248 | 2014-02-21 12:39:17 +0200 | [diff] [blame] | 348 | } |
| 349 | } |
| 350 | |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 351 | int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, |
Eugenia Emantayev | 41d942d | 2013-11-07 12:19:52 +0200 | [diff] [blame] | 352 | struct mlx4_en_rx_ring **pring, |
Eugenia Emantayev | 163561a | 2013-11-07 12:19:54 +0200 | [diff] [blame] | 353 | u32 size, u16 stride, int node) |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 354 | { |
| 355 | struct mlx4_en_dev *mdev = priv->mdev; |
Eugenia Emantayev | 41d942d | 2013-11-07 12:19:52 +0200 | [diff] [blame] | 356 | struct mlx4_en_rx_ring *ring; |
Thadeu Lima de Souza Cascardo | 4cce66c | 2012-07-16 07:01:53 +0000 | [diff] [blame] | 357 | int err = -ENOMEM; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 358 | int tmp; |
| 359 | |
Eugenia Emantayev | 163561a | 2013-11-07 12:19:54 +0200 | [diff] [blame] | 360 | ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node); |
Eugenia Emantayev | 41d942d | 2013-11-07 12:19:52 +0200 | [diff] [blame] | 361 | if (!ring) { |
Eugenia Emantayev | 163561a | 2013-11-07 12:19:54 +0200 | [diff] [blame] | 362 | ring = kzalloc(sizeof(*ring), GFP_KERNEL); |
| 363 | if (!ring) { |
| 364 | en_err(priv, "Failed to allocate RX ring structure\n"); |
| 365 | return -ENOMEM; |
| 366 | } |
Eugenia Emantayev | 41d942d | 2013-11-07 12:19:52 +0200 | [diff] [blame] | 367 | } |
| 368 | |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 369 | ring->prod = 0; |
| 370 | ring->cons = 0; |
| 371 | ring->size = size; |
| 372 | ring->size_mask = size - 1; |
| 373 | ring->stride = stride; |
| 374 | ring->log_stride = ffs(ring->stride) - 1; |
Yevgeny Petrilin | 9f519f6 | 2009-08-06 19:28:18 -0700 | [diff] [blame] | 375 | ring->buf_size = ring->size * ring->stride + TXBB_SIZE; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 376 | |
| 377 | tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS * |
Thadeu Lima de Souza Cascardo | 4cce66c | 2012-07-16 07:01:53 +0000 | [diff] [blame] | 378 | sizeof(struct mlx4_en_rx_alloc)); |
Eugenia Emantayev | 163561a | 2013-11-07 12:19:54 +0200 | [diff] [blame] | 379 | ring->rx_info = vmalloc_node(tmp, node); |
Eugenia Emantayev | 41d942d | 2013-11-07 12:19:52 +0200 | [diff] [blame] | 380 | if (!ring->rx_info) { |
Eugenia Emantayev | 163561a | 2013-11-07 12:19:54 +0200 | [diff] [blame] | 381 | ring->rx_info = vmalloc(tmp); |
| 382 | if (!ring->rx_info) { |
| 383 | err = -ENOMEM; |
| 384 | goto err_ring; |
| 385 | } |
Eugenia Emantayev | 41d942d | 2013-11-07 12:19:52 +0200 | [diff] [blame] | 386 | } |
Joe Perches | e404dec | 2012-01-29 12:56:23 +0000 | [diff] [blame] | 387 | |
Yevgeny Petrilin | 453a608 | 2009-06-01 20:27:13 +0000 | [diff] [blame] | 388 | en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n", |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 389 | ring->rx_info, tmp); |
| 390 | |
Eugenia Emantayev | 163561a | 2013-11-07 12:19:54 +0200 | [diff] [blame] | 391 | /* Allocate HW buffers on provided NUMA node */ |
Yishai Hadas | 872bf2f | 2015-01-25 16:59:35 +0200 | [diff] [blame] | 392 | set_dev_node(&mdev->dev->persist->pdev->dev, node); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 393 | err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, |
| 394 | ring->buf_size, 2 * PAGE_SIZE); |
Yishai Hadas | 872bf2f | 2015-01-25 16:59:35 +0200 | [diff] [blame] | 395 | set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 396 | if (err) |
Eugenia Emantayev | 41d942d | 2013-11-07 12:19:52 +0200 | [diff] [blame] | 397 | goto err_info; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 398 | |
| 399 | err = mlx4_en_map_buffer(&ring->wqres.buf); |
| 400 | if (err) { |
Yevgeny Petrilin | 453a608 | 2009-06-01 20:27:13 +0000 | [diff] [blame] | 401 | en_err(priv, "Failed to map RX buffer\n"); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 402 | goto err_hwq; |
| 403 | } |
| 404 | ring->buf = ring->wqres.buf.direct.buf; |
| 405 | |
Amir Vadai | ec693d4 | 2013-04-23 06:06:49 +0000 | [diff] [blame] | 406 | ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter; |
| 407 | |
Eugenia Emantayev | 41d942d | 2013-11-07 12:19:52 +0200 | [diff] [blame] | 408 | *pring = ring; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 409 | return 0; |
| 410 | |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 411 | err_hwq: |
| 412 | mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); |
Eugenia Emantayev | 41d942d | 2013-11-07 12:19:52 +0200 | [diff] [blame] | 413 | err_info: |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 414 | vfree(ring->rx_info); |
| 415 | ring->rx_info = NULL; |
Eugenia Emantayev | 41d942d | 2013-11-07 12:19:52 +0200 | [diff] [blame] | 416 | err_ring: |
| 417 | kfree(ring); |
| 418 | *pring = NULL; |
| 419 | |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 420 | return err; |
| 421 | } |
| 422 | |
| 423 | int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) |
| 424 | { |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 425 | struct mlx4_en_rx_ring *ring; |
| 426 | int i; |
| 427 | int ring_ind; |
| 428 | int err; |
| 429 | int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + |
| 430 | DS_SIZE * priv->num_frags); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 431 | |
| 432 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { |
Eugenia Emantayev | 41d942d | 2013-11-07 12:19:52 +0200 | [diff] [blame] | 433 | ring = priv->rx_ring[ring_ind]; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 434 | |
| 435 | ring->prod = 0; |
| 436 | ring->cons = 0; |
| 437 | ring->actual_size = 0; |
Eugenia Emantayev | 41d942d | 2013-11-07 12:19:52 +0200 | [diff] [blame] | 438 | ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 439 | |
| 440 | ring->stride = stride; |
Yevgeny Petrilin | 9f519f6 | 2009-08-06 19:28:18 -0700 | [diff] [blame] | 441 | if (ring->stride <= TXBB_SIZE) |
| 442 | ring->buf += TXBB_SIZE; |
| 443 | |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 444 | ring->log_stride = ffs(ring->stride) - 1; |
| 445 | ring->buf_size = ring->size * ring->stride; |
| 446 | |
| 447 | memset(ring->buf, 0, ring->buf_size); |
| 448 | mlx4_en_update_rx_prod_db(ring); |
| 449 | |
Thadeu Lima de Souza Cascardo | 4cce66c | 2012-07-16 07:01:53 +0000 | [diff] [blame] | 450 | /* Initialize all descriptors */ |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 451 | for (i = 0; i < ring->size; i++) |
| 452 | mlx4_en_init_rx_desc(priv, ring, i); |
| 453 | |
| 454 | /* Initialize page allocators */ |
| 455 | err = mlx4_en_init_allocator(priv, ring); |
| 456 | if (err) { |
Yevgeny Petrilin | 453a608 | 2009-06-01 20:27:13 +0000 | [diff] [blame] | 457 | en_err(priv, "Failed initializing ring allocator\n"); |
Yevgeny Petrilin | 60b1809 | 2011-04-06 23:25:45 +0000 | [diff] [blame] | 458 | if (ring->stride <= TXBB_SIZE) |
| 459 | ring->buf -= TXBB_SIZE; |
Yevgeny Petrilin | 9a4f92a | 2009-04-20 04:24:28 +0000 | [diff] [blame] | 460 | ring_ind--; |
| 461 | goto err_allocator; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 462 | } |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 463 | } |
Ingo Molnar | b58515b | 2008-11-25 16:53:32 -0800 | [diff] [blame] | 464 | err = mlx4_en_fill_rx_buffers(priv); |
| 465 | if (err) |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 466 | goto err_buffers; |
| 467 | |
| 468 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { |
Eugenia Emantayev | 41d942d | 2013-11-07 12:19:52 +0200 | [diff] [blame] | 469 | ring = priv->rx_ring[ring_ind]; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 470 | |
Yevgeny Petrilin | 00d7d7b | 2010-08-24 03:45:20 +0000 | [diff] [blame] | 471 | ring->size_mask = ring->actual_size - 1; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 472 | mlx4_en_update_rx_prod_db(ring); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 473 | } |
| 474 | |
| 475 | return 0; |
| 476 | |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 477 | err_buffers: |
| 478 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) |
Eugenia Emantayev | 41d942d | 2013-11-07 12:19:52 +0200 | [diff] [blame] | 479 | mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 480 | |
| 481 | ring_ind = priv->rx_ring_num - 1; |
| 482 | err_allocator: |
| 483 | while (ring_ind >= 0) { |
Eugenia Emantayev | 41d942d | 2013-11-07 12:19:52 +0200 | [diff] [blame] | 484 | if (priv->rx_ring[ring_ind]->stride <= TXBB_SIZE) |
| 485 | priv->rx_ring[ring_ind]->buf -= TXBB_SIZE; |
| 486 | mlx4_en_destroy_allocator(priv, priv->rx_ring[ring_ind]); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 487 | ring_ind--; |
| 488 | } |
| 489 | return err; |
| 490 | } |
| 491 | |
Ido Shamay | 07841f9 | 2015-04-30 17:32:46 +0300 | [diff] [blame] | 492 | /* We recover from out of memory by scheduling our napi poll |
| 493 | * function (mlx4_en_process_cq), which tries to allocate |
| 494 | * all missing RX buffers (call to mlx4_en_refill_rx_buffers). |
| 495 | */ |
| 496 | void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv) |
| 497 | { |
| 498 | int ring; |
| 499 | |
| 500 | if (!priv->port_up) |
| 501 | return; |
| 502 | |
| 503 | for (ring = 0; ring < priv->rx_ring_num; ring++) { |
| 504 | if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) |
| 505 | napi_reschedule(&priv->rx_cq[ring]->napi); |
| 506 | } |
| 507 | } |
| 508 | |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 509 | void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, |
Eugenia Emantayev | 41d942d | 2013-11-07 12:19:52 +0200 | [diff] [blame] | 510 | struct mlx4_en_rx_ring **pring, |
| 511 | u32 size, u16 stride) |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 512 | { |
| 513 | struct mlx4_en_dev *mdev = priv->mdev; |
Eugenia Emantayev | 41d942d | 2013-11-07 12:19:52 +0200 | [diff] [blame] | 514 | struct mlx4_en_rx_ring *ring = *pring; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 515 | |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 516 | mlx4_en_unmap_buffer(&ring->wqres.buf); |
Thadeu Lima de Souza Cascardo | 68355f7 | 2012-02-06 08:39:49 +0000 | [diff] [blame] | 517 | mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 518 | vfree(ring->rx_info); |
| 519 | ring->rx_info = NULL; |
Eugenia Emantayev | 41d942d | 2013-11-07 12:19:52 +0200 | [diff] [blame] | 520 | kfree(ring); |
| 521 | *pring = NULL; |
Amir Vadai | 1eb8c69 | 2012-07-18 22:33:52 +0000 | [diff] [blame] | 522 | #ifdef CONFIG_RFS_ACCEL |
Eugenia Emantayev | 41d942d | 2013-11-07 12:19:52 +0200 | [diff] [blame] | 523 | mlx4_en_cleanup_filters(priv); |
Amir Vadai | 1eb8c69 | 2012-07-18 22:33:52 +0000 | [diff] [blame] | 524 | #endif |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 525 | } |
| 526 | |
| 527 | void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, |
| 528 | struct mlx4_en_rx_ring *ring) |
| 529 | { |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 530 | mlx4_en_free_rx_buf(priv, ring); |
Yevgeny Petrilin | 9f519f6 | 2009-08-06 19:28:18 -0700 | [diff] [blame] | 531 | if (ring->stride <= TXBB_SIZE) |
| 532 | ring->buf -= TXBB_SIZE; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 533 | mlx4_en_destroy_allocator(priv, ring); |
| 534 | } |
| 535 | |
| 536 | |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 537 | static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, |
| 538 | struct mlx4_en_rx_desc *rx_desc, |
Thadeu Lima de Souza Cascardo | 4cce66c | 2012-07-16 07:01:53 +0000 | [diff] [blame] | 539 | struct mlx4_en_rx_alloc *frags, |
Eric Dumazet | 90278c9 | 2011-10-19 18:49:52 +0000 | [diff] [blame] | 540 | struct sk_buff *skb, |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 541 | int length) |
| 542 | { |
Eric Dumazet | 90278c9 | 2011-10-19 18:49:52 +0000 | [diff] [blame] | 543 | struct skb_frag_struct *skb_frags_rx = skb_shinfo(skb)->frags; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 544 | struct mlx4_en_frag_info *frag_info; |
| 545 | int nr; |
| 546 | dma_addr_t dma; |
| 547 | |
Thadeu Lima de Souza Cascardo | 4cce66c | 2012-07-16 07:01:53 +0000 | [diff] [blame] | 548 | /* Collect used fragments while replacing them in the HW descriptors */ |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 549 | for (nr = 0; nr < priv->num_frags; nr++) { |
| 550 | frag_info = &priv->frag_info[nr]; |
| 551 | if (length <= frag_info->frag_prefix_size) |
| 552 | break; |
Thadeu Lima de Souza Cascardo | 4cce66c | 2012-07-16 07:01:53 +0000 | [diff] [blame] | 553 | if (!frags[nr].page) |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 554 | goto fail; |
| 555 | |
Thadeu Lima de Souza Cascardo | 4cce66c | 2012-07-16 07:01:53 +0000 | [diff] [blame] | 556 | dma = be64_to_cpu(rx_desc->data[nr].addr); |
| 557 | dma_sync_single_for_cpu(priv->ddev, dma, frag_info->frag_size, |
| 558 | DMA_FROM_DEVICE); |
| 559 | |
| 560 | /* Save page reference in skb */ |
Thadeu Lima de Souza Cascardo | 4cce66c | 2012-07-16 07:01:53 +0000 | [diff] [blame] | 561 | __skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page); |
| 562 | skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size); |
Amir Vadai | 70fbe07 | 2013-10-07 13:38:12 +0200 | [diff] [blame] | 563 | skb_frags_rx[nr].page_offset = frags[nr].page_offset; |
Thadeu Lima de Souza Cascardo | 4cce66c | 2012-07-16 07:01:53 +0000 | [diff] [blame] | 564 | skb->truesize += frag_info->frag_stride; |
Eric Dumazet | 51151a1 | 2013-06-23 08:17:56 -0700 | [diff] [blame] | 565 | frags[nr].page = NULL; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 566 | } |
| 567 | /* Adjust size of last fragment to match actual length */ |
roel kluin | 973507c | 2009-08-08 23:54:21 +0000 | [diff] [blame] | 568 | if (nr > 0) |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 569 | skb_frag_size_set(&skb_frags_rx[nr - 1], |
| 570 | length - priv->frag_info[nr - 1].frag_prefix_size); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 571 | return nr; |
| 572 | |
| 573 | fail: |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 574 | while (nr > 0) { |
| 575 | nr--; |
Ian Campbell | 311761c | 2011-10-19 23:01:45 +0000 | [diff] [blame] | 576 | __skb_frag_unref(&skb_frags_rx[nr]); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 577 | } |
| 578 | return 0; |
| 579 | } |
| 580 | |
| 581 | |
| 582 | static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv, |
| 583 | struct mlx4_en_rx_desc *rx_desc, |
Thadeu Lima de Souza Cascardo | 4cce66c | 2012-07-16 07:01:53 +0000 | [diff] [blame] | 584 | struct mlx4_en_rx_alloc *frags, |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 585 | unsigned int length) |
| 586 | { |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 587 | struct sk_buff *skb; |
| 588 | void *va; |
| 589 | int used_frags; |
| 590 | dma_addr_t dma; |
| 591 | |
Pradeep A Dalvi | c056b73 | 2012-02-05 02:50:38 +0000 | [diff] [blame] | 592 | skb = netdev_alloc_skb(priv->dev, SMALL_PACKET_SIZE + NET_IP_ALIGN); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 593 | if (!skb) { |
Yevgeny Petrilin | 453a608 | 2009-06-01 20:27:13 +0000 | [diff] [blame] | 594 | en_dbg(RX_ERR, priv, "Failed allocating skb\n"); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 595 | return NULL; |
| 596 | } |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 597 | skb_reserve(skb, NET_IP_ALIGN); |
| 598 | skb->len = length; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 599 | |
| 600 | /* Get pointer to first fragment so we could copy the headers into the |
| 601 | * (linear part of the) skb */ |
Amir Vadai | 70fbe07 | 2013-10-07 13:38:12 +0200 | [diff] [blame] | 602 | va = page_address(frags[0].page) + frags[0].page_offset; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 603 | |
| 604 | if (length <= SMALL_PACKET_SIZE) { |
| 605 | /* We are copying all relevant data to the skb - temporarily |
Thadeu Lima de Souza Cascardo | 4cce66c | 2012-07-16 07:01:53 +0000 | [diff] [blame] | 606 | * sync buffers for the copy */ |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 607 | dma = be64_to_cpu(rx_desc->data[0].addr); |
Yevgeny Petrilin | ebf8c9a | 2012-03-06 04:03:34 +0000 | [diff] [blame] | 608 | dma_sync_single_for_cpu(priv->ddev, dma, length, |
FUJITA Tomonori | e4fc856 | 2010-02-04 18:57:42 +0000 | [diff] [blame] | 609 | DMA_FROM_DEVICE); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 610 | skb_copy_to_linear_data(skb, va, length); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 611 | skb->tail += length; |
| 612 | } else { |
Eric Dumazet | cfecec5 | 2014-09-05 18:29:45 -0700 | [diff] [blame] | 613 | unsigned int pull_len; |
| 614 | |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 615 | /* Move relevant fragments to skb */ |
Thadeu Lima de Souza Cascardo | 4cce66c | 2012-07-16 07:01:53 +0000 | [diff] [blame] | 616 | used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, frags, |
| 617 | skb, length); |
Yevgeny Petrilin | 785a0982 | 2009-04-26 20:42:57 +0000 | [diff] [blame] | 618 | if (unlikely(!used_frags)) { |
| 619 | kfree_skb(skb); |
| 620 | return NULL; |
| 621 | } |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 622 | skb_shinfo(skb)->nr_frags = used_frags; |
| 623 | |
Eric Dumazet | cfecec5 | 2014-09-05 18:29:45 -0700 | [diff] [blame] | 624 | pull_len = eth_get_headlen(va, SMALL_PACKET_SIZE); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 625 | /* Copy headers into the skb linear buffer */ |
Eric Dumazet | cfecec5 | 2014-09-05 18:29:45 -0700 | [diff] [blame] | 626 | memcpy(skb->data, va, pull_len); |
| 627 | skb->tail += pull_len; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 628 | |
| 629 | /* Skip headers in first fragment */ |
Eric Dumazet | cfecec5 | 2014-09-05 18:29:45 -0700 | [diff] [blame] | 630 | skb_shinfo(skb)->frags[0].page_offset += pull_len; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 631 | |
| 632 | /* Adjust size of first fragment */ |
Eric Dumazet | cfecec5 | 2014-09-05 18:29:45 -0700 | [diff] [blame] | 633 | skb_frag_size_sub(&skb_shinfo(skb)->frags[0], pull_len); |
| 634 | skb->data_len = length - pull_len; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 635 | } |
| 636 | return skb; |
| 637 | } |
| 638 | |
Yevgeny Petrilin | e7c1c2c4 | 2010-08-24 03:46:18 +0000 | [diff] [blame] | 639 | static void validate_loopback(struct mlx4_en_priv *priv, struct sk_buff *skb) |
| 640 | { |
| 641 | int i; |
| 642 | int offset = ETH_HLEN; |
| 643 | |
| 644 | for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++, offset++) { |
| 645 | if (*(skb->data + offset) != (unsigned char) (i & 0xff)) |
| 646 | goto out_loopback; |
| 647 | } |
| 648 | /* Loopback found */ |
| 649 | priv->loopback_ok = 1; |
| 650 | |
| 651 | out_loopback: |
| 652 | dev_kfree_skb_any(skb); |
| 653 | } |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 654 | |
Thadeu Lima de Souza Cascardo | 4cce66c | 2012-07-16 07:01:53 +0000 | [diff] [blame] | 655 | static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv, |
| 656 | struct mlx4_en_rx_ring *ring) |
| 657 | { |
| 658 | int index = ring->prod & ring->size_mask; |
| 659 | |
| 660 | while ((u32) (ring->prod - ring->cons) < ring->actual_size) { |
Ido Shamay | 1ab25f8 | 2014-11-02 16:26:16 +0200 | [diff] [blame] | 661 | if (mlx4_en_prepare_rx_desc(priv, ring, index, |
| 662 | GFP_ATOMIC | __GFP_COLD)) |
Thadeu Lima de Souza Cascardo | 4cce66c | 2012-07-16 07:01:53 +0000 | [diff] [blame] | 663 | break; |
| 664 | ring->prod++; |
| 665 | index = ring->prod & ring->size_mask; |
| 666 | } |
| 667 | } |
| 668 | |
Shani Michaeli | f8c6455 | 2014-11-09 13:51:53 +0200 | [diff] [blame] | 669 | /* When hardware doesn't strip the vlan, we need to calculate the checksum |
| 670 | * over it and add it to the hardware's checksum calculation |
| 671 | */ |
| 672 | static inline __wsum get_fixed_vlan_csum(__wsum hw_checksum, |
| 673 | struct vlan_hdr *vlanh) |
| 674 | { |
| 675 | return csum_add(hw_checksum, *(__wsum *)vlanh); |
| 676 | } |
| 677 | |
| 678 | /* Although the stack expects checksum which doesn't include the pseudo |
| 679 | * header, the HW adds it. To address that, we are subtracting the pseudo |
| 680 | * header checksum from the checksum value provided by the HW. |
| 681 | */ |
| 682 | static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb, |
| 683 | struct iphdr *iph) |
| 684 | { |
| 685 | __u16 length_for_csum = 0; |
| 686 | __wsum csum_pseudo_header = 0; |
| 687 | |
| 688 | length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2)); |
| 689 | csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr, |
| 690 | length_for_csum, iph->protocol, 0); |
| 691 | skb->csum = csum_sub(hw_checksum, csum_pseudo_header); |
| 692 | } |
| 693 | |
| 694 | #if IS_ENABLED(CONFIG_IPV6) |
| 695 | /* In IPv6 packets, besides subtracting the pseudo header checksum, |
| 696 | * we also compute/add the IP header checksum which |
| 697 | * is not added by the HW. |
| 698 | */ |
| 699 | static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, |
| 700 | struct ipv6hdr *ipv6h) |
| 701 | { |
| 702 | __wsum csum_pseudo_hdr = 0; |
| 703 | |
| 704 | if (ipv6h->nexthdr == IPPROTO_FRAGMENT || ipv6h->nexthdr == IPPROTO_HOPOPTS) |
| 705 | return -1; |
| 706 | hw_checksum = csum_add(hw_checksum, (__force __wsum)(ipv6h->nexthdr << 8)); |
| 707 | |
| 708 | csum_pseudo_hdr = csum_partial(&ipv6h->saddr, |
| 709 | sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0); |
| 710 | csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len); |
| 711 | csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ntohs(ipv6h->nexthdr)); |
| 712 | |
| 713 | skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr); |
| 714 | skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0)); |
| 715 | return 0; |
| 716 | } |
| 717 | #endif |
| 718 | static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, |
Ido Shamay | 79a2585 | 2015-06-25 11:29:43 +0300 | [diff] [blame] | 719 | netdev_features_t dev_features) |
Shani Michaeli | f8c6455 | 2014-11-09 13:51:53 +0200 | [diff] [blame] | 720 | { |
| 721 | __wsum hw_checksum = 0; |
| 722 | |
| 723 | void *hdr = (u8 *)va + sizeof(struct ethhdr); |
| 724 | |
| 725 | hw_checksum = csum_unfold((__force __sum16)cqe->checksum); |
| 726 | |
Hadar Hen Zion | e802f8e | 2015-07-27 14:46:33 +0300 | [diff] [blame] | 727 | if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) && |
Ido Shamay | 79a2585 | 2015-06-25 11:29:43 +0300 | [diff] [blame] | 728 | !(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) { |
Shani Michaeli | f8c6455 | 2014-11-09 13:51:53 +0200 | [diff] [blame] | 729 | hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr); |
| 730 | hdr += sizeof(struct vlan_hdr); |
| 731 | } |
| 732 | |
| 733 | if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4)) |
| 734 | get_fixed_ipv4_csum(hw_checksum, skb, hdr); |
| 735 | #if IS_ENABLED(CONFIG_IPV6) |
| 736 | else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) |
| 737 | if (get_fixed_ipv6_csum(hw_checksum, skb, hdr)) |
| 738 | return -1; |
| 739 | #endif |
| 740 | return 0; |
| 741 | } |
| 742 | |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 743 | int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) |
| 744 | { |
| 745 | struct mlx4_en_priv *priv = netdev_priv(dev); |
Amir Vadai | ec693d4 | 2013-04-23 06:06:49 +0000 | [diff] [blame] | 746 | struct mlx4_en_dev *mdev = priv->mdev; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 747 | struct mlx4_cqe *cqe; |
Eugenia Emantayev | 41d942d | 2013-11-07 12:19:52 +0200 | [diff] [blame] | 748 | struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring]; |
Thadeu Lima de Souza Cascardo | 4cce66c | 2012-07-16 07:01:53 +0000 | [diff] [blame] | 749 | struct mlx4_en_rx_alloc *frags; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 750 | struct mlx4_en_rx_desc *rx_desc; |
| 751 | struct sk_buff *skb; |
| 752 | int index; |
| 753 | int nr; |
| 754 | unsigned int length; |
| 755 | int polled = 0; |
| 756 | int ip_summed; |
Or Gerlitz | 08ff323 | 2012-10-21 14:59:24 +0000 | [diff] [blame] | 757 | int factor = priv->cqe_factor; |
Amir Vadai | ec693d4 | 2013-04-23 06:06:49 +0000 | [diff] [blame] | 758 | u64 timestamp; |
Or Gerlitz | 837052d | 2013-12-23 16:09:44 +0200 | [diff] [blame] | 759 | bool l2_tunnel; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 760 | |
| 761 | if (!priv->port_up) |
| 762 | return 0; |
| 763 | |
Eric W. Biederman | 38be0a3 | 2014-03-14 18:05:58 -0700 | [diff] [blame] | 764 | if (budget <= 0) |
| 765 | return polled; |
| 766 | |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 767 | /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx |
| 768 | * descriptor offset can be deduced from the CQE index instead of |
| 769 | * reading 'cqe->index' */ |
| 770 | index = cq->mcq.cons_index & ring->size_mask; |
Ido Shamay | b1b6b4d | 2014-09-18 11:51:01 +0300 | [diff] [blame] | 771 | cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 772 | |
| 773 | /* Process all completed CQEs */ |
| 774 | while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, |
| 775 | cq->mcq.cons_index & cq->size)) { |
| 776 | |
Thadeu Lima de Souza Cascardo | 4cce66c | 2012-07-16 07:01:53 +0000 | [diff] [blame] | 777 | frags = ring->rx_info + (index << priv->log_rx_info); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 778 | rx_desc = ring->buf + (index << ring->log_stride); |
| 779 | |
| 780 | /* |
| 781 | * make sure we read the CQE after we read the ownership bit |
| 782 | */ |
Alexander Duyck | 12b3375 | 2015-04-08 18:49:36 -0700 | [diff] [blame] | 783 | dma_rmb(); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 784 | |
| 785 | /* Drop packet on bad receive or bad checksum */ |
| 786 | if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == |
| 787 | MLX4_CQE_OPCODE_ERROR)) { |
Joe Perches | 1a91de2 | 2014-05-07 12:52:57 -0700 | [diff] [blame] | 788 | en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n", |
| 789 | ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome, |
| 790 | ((struct mlx4_err_cqe *)cqe)->syndrome); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 791 | goto next; |
| 792 | } |
| 793 | if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) { |
Yevgeny Petrilin | 453a608 | 2009-06-01 20:27:13 +0000 | [diff] [blame] | 794 | en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n"); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 795 | goto next; |
| 796 | } |
| 797 | |
Yan Burman | 79aeacc | 2013-02-07 02:25:19 +0000 | [diff] [blame] | 798 | /* Check if we need to drop the packet if SRIOV is not enabled |
| 799 | * and not performing the selftest or flb disabled |
| 800 | */ |
| 801 | if (priv->flags & MLX4_EN_FLAG_RX_FILTER_NEEDED) { |
| 802 | struct ethhdr *ethh; |
| 803 | dma_addr_t dma; |
Yan Burman | 79aeacc | 2013-02-07 02:25:19 +0000 | [diff] [blame] | 804 | /* Get pointer to first fragment since we haven't |
| 805 | * skb yet and cast it to ethhdr struct |
| 806 | */ |
| 807 | dma = be64_to_cpu(rx_desc->data[0].addr); |
| 808 | dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh), |
| 809 | DMA_FROM_DEVICE); |
| 810 | ethh = (struct ethhdr *)(page_address(frags[0].page) + |
Amir Vadai | 70fbe07 | 2013-10-07 13:38:12 +0200 | [diff] [blame] | 811 | frags[0].page_offset); |
Eugenia Emantayev | 5b4c4d3 | 2011-12-13 04:16:38 +0000 | [diff] [blame] | 812 | |
Yan Burman | c07cb4b | 2013-02-07 02:25:25 +0000 | [diff] [blame] | 813 | if (is_multicast_ether_addr(ethh->h_dest)) { |
| 814 | struct mlx4_mac_entry *entry; |
Yan Burman | c07cb4b | 2013-02-07 02:25:25 +0000 | [diff] [blame] | 815 | struct hlist_head *bucket; |
| 816 | unsigned int mac_hash; |
| 817 | |
| 818 | /* Drop the packet, since HW loopback-ed it */ |
| 819 | mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX]; |
| 820 | bucket = &priv->mac_hash[mac_hash]; |
| 821 | rcu_read_lock(); |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 822 | hlist_for_each_entry_rcu(entry, bucket, hlist) { |
Yan Burman | c07cb4b | 2013-02-07 02:25:25 +0000 | [diff] [blame] | 823 | if (ether_addr_equal_64bits(entry->mac, |
| 824 | ethh->h_source)) { |
| 825 | rcu_read_unlock(); |
| 826 | goto next; |
| 827 | } |
| 828 | } |
| 829 | rcu_read_unlock(); |
| 830 | } |
Yan Burman | 79aeacc | 2013-02-07 02:25:19 +0000 | [diff] [blame] | 831 | } |
Eugenia Emantayev | 5b4c4d3 | 2011-12-13 04:16:38 +0000 | [diff] [blame] | 832 | |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 833 | /* |
| 834 | * Packet is OK - process it. |
| 835 | */ |
| 836 | length = be32_to_cpu(cqe->byte_cnt); |
Yevgeny Petrilin | 4a5f4dd | 2011-11-14 14:25:36 -0500 | [diff] [blame] | 837 | length -= ring->fcs_del; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 838 | ring->bytes += length; |
| 839 | ring->packets++; |
Or Gerlitz | 837052d | 2013-12-23 16:09:44 +0200 | [diff] [blame] | 840 | l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) && |
| 841 | (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL)); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 842 | |
Michał Mirosław | c8c64cf | 2011-04-15 04:50:49 +0000 | [diff] [blame] | 843 | if (likely(dev->features & NETIF_F_RXCSUM)) { |
Shani Michaeli | f8c6455 | 2014-11-09 13:51:53 +0200 | [diff] [blame] | 844 | if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP | |
| 845 | MLX4_CQE_STATUS_UDP)) { |
| 846 | if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && |
| 847 | cqe->checksum == cpu_to_be16(0xffff)) { |
| 848 | ip_summed = CHECKSUM_UNNECESSARY; |
| 849 | ring->csum_ok++; |
| 850 | } else { |
| 851 | ip_summed = CHECKSUM_NONE; |
| 852 | ring->csum_none++; |
| 853 | } |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 854 | } else { |
Shani Michaeli | f8c6455 | 2014-11-09 13:51:53 +0200 | [diff] [blame] | 855 | if (priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP && |
| 856 | (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 | |
| 857 | MLX4_CQE_STATUS_IPV6))) { |
| 858 | ip_summed = CHECKSUM_COMPLETE; |
| 859 | ring->csum_complete++; |
| 860 | } else { |
| 861 | ip_summed = CHECKSUM_NONE; |
| 862 | ring->csum_none++; |
| 863 | } |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 864 | } |
| 865 | } else { |
| 866 | ip_summed = CHECKSUM_NONE; |
Yevgeny Petrilin | ad04378 | 2011-10-18 01:50:56 +0000 | [diff] [blame] | 867 | ring->csum_none++; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 868 | } |
| 869 | |
Shani Michaeli | dd65bea | 2014-11-09 13:51:52 +0200 | [diff] [blame] | 870 | /* This packet is eligible for GRO if it is: |
| 871 | * - DIX Ethernet (type interpretation) |
| 872 | * - TCP/IP (v4) |
| 873 | * - without IP options |
| 874 | * - not an IP fragment |
Shani Michaeli | dd65bea | 2014-11-09 13:51:52 +0200 | [diff] [blame] | 875 | */ |
Eric Dumazet | 868fdb0 | 2015-11-18 06:30:58 -0800 | [diff] [blame] | 876 | if (dev->features & NETIF_F_GRO) { |
Shani Michaeli | dd65bea | 2014-11-09 13:51:52 +0200 | [diff] [blame] | 877 | struct sk_buff *gro_skb = napi_get_frags(&cq->napi); |
| 878 | if (!gro_skb) |
| 879 | goto next; |
| 880 | |
| 881 | nr = mlx4_en_complete_rx_desc(priv, |
| 882 | rx_desc, frags, gro_skb, |
| 883 | length); |
| 884 | if (!nr) |
| 885 | goto next; |
| 886 | |
Shani Michaeli | f8c6455 | 2014-11-09 13:51:53 +0200 | [diff] [blame] | 887 | if (ip_summed == CHECKSUM_COMPLETE) { |
| 888 | void *va = skb_frag_address(skb_shinfo(gro_skb)->frags); |
Ido Shamay | 79a2585 | 2015-06-25 11:29:43 +0300 | [diff] [blame] | 889 | if (check_csum(cqe, gro_skb, va, |
| 890 | dev->features)) { |
Shani Michaeli | f8c6455 | 2014-11-09 13:51:53 +0200 | [diff] [blame] | 891 | ip_summed = CHECKSUM_NONE; |
| 892 | ring->csum_none++; |
| 893 | ring->csum_complete--; |
| 894 | } |
| 895 | } |
| 896 | |
Shani Michaeli | dd65bea | 2014-11-09 13:51:52 +0200 | [diff] [blame] | 897 | skb_shinfo(gro_skb)->nr_frags = nr; |
| 898 | gro_skb->len = length; |
| 899 | gro_skb->data_len = length; |
| 900 | gro_skb->ip_summed = ip_summed; |
| 901 | |
| 902 | if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY) |
Or Gerlitz | c58942f | 2014-12-11 10:57:51 +0200 | [diff] [blame] | 903 | gro_skb->csum_level = 1; |
| 904 | |
Shani Michaeli | dd65bea | 2014-11-09 13:51:52 +0200 | [diff] [blame] | 905 | if ((cqe->vlan_my_qpn & |
Hadar Hen Zion | e802f8e | 2015-07-27 14:46:33 +0300 | [diff] [blame] | 906 | cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) && |
Shani Michaeli | dd65bea | 2014-11-09 13:51:52 +0200 | [diff] [blame] | 907 | (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { |
| 908 | u16 vid = be16_to_cpu(cqe->sl_vid); |
| 909 | |
| 910 | __vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid); |
Hadar Hen Zion | e38af4f | 2015-07-27 14:46:34 +0300 | [diff] [blame] | 911 | } else if ((be32_to_cpu(cqe->vlan_my_qpn) & |
| 912 | MLX4_CQE_SVLAN_PRESENT_MASK) && |
| 913 | (dev->features & NETIF_F_HW_VLAN_STAG_RX)) { |
| 914 | __vlan_hwaccel_put_tag(gro_skb, |
| 915 | htons(ETH_P_8021AD), |
| 916 | be16_to_cpu(cqe->sl_vid)); |
Shani Michaeli | dd65bea | 2014-11-09 13:51:52 +0200 | [diff] [blame] | 917 | } |
| 918 | |
| 919 | if (dev->features & NETIF_F_RXHASH) |
| 920 | skb_set_hash(gro_skb, |
| 921 | be32_to_cpu(cqe->immed_rss_invalid), |
Eric Dumazet | 0a6d424 | 2015-07-02 13:24:44 +0200 | [diff] [blame] | 922 | (ip_summed == CHECKSUM_UNNECESSARY) ? |
| 923 | PKT_HASH_TYPE_L4 : |
| 924 | PKT_HASH_TYPE_L3); |
Shani Michaeli | dd65bea | 2014-11-09 13:51:52 +0200 | [diff] [blame] | 925 | |
| 926 | skb_record_rx_queue(gro_skb, cq->ring); |
Shani Michaeli | dd65bea | 2014-11-09 13:51:52 +0200 | [diff] [blame] | 927 | |
| 928 | if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) { |
| 929 | timestamp = mlx4_en_get_cqe_ts(cqe); |
| 930 | mlx4_en_fill_hwtstamps(mdev, |
| 931 | skb_hwtstamps(gro_skb), |
| 932 | timestamp); |
| 933 | } |
| 934 | |
| 935 | napi_gro_frags(&cq->napi); |
| 936 | goto next; |
| 937 | } |
| 938 | |
| 939 | /* GRO not possible, complete processing here */ |
Thadeu Lima de Souza Cascardo | 4cce66c | 2012-07-16 07:01:53 +0000 | [diff] [blame] | 940 | skb = mlx4_en_rx_skb(priv, rx_desc, frags, length); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 941 | if (!skb) { |
| 942 | priv->stats.rx_dropped++; |
| 943 | goto next; |
| 944 | } |
| 945 | |
Yevgeny Petrilin | e7c1c2c4 | 2010-08-24 03:46:18 +0000 | [diff] [blame] | 946 | if (unlikely(priv->validate_loopback)) { |
| 947 | validate_loopback(priv, skb); |
| 948 | goto next; |
| 949 | } |
| 950 | |
Shani Michaeli | f8c6455 | 2014-11-09 13:51:53 +0200 | [diff] [blame] | 951 | if (ip_summed == CHECKSUM_COMPLETE) { |
Ido Shamay | 79a2585 | 2015-06-25 11:29:43 +0300 | [diff] [blame] | 952 | if (check_csum(cqe, skb, skb->data, dev->features)) { |
Shani Michaeli | f8c6455 | 2014-11-09 13:51:53 +0200 | [diff] [blame] | 953 | ip_summed = CHECKSUM_NONE; |
| 954 | ring->csum_complete--; |
| 955 | ring->csum_none++; |
| 956 | } |
| 957 | } |
| 958 | |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 959 | skb->ip_summed = ip_summed; |
| 960 | skb->protocol = eth_type_trans(skb, dev); |
David S. Miller | 0c8dfc8 | 2009-01-27 16:22:32 -0800 | [diff] [blame] | 961 | skb_record_rx_queue(skb, cq->ring); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 962 | |
Tom Herbert | 9ca8600 | 2014-08-27 21:27:53 -0700 | [diff] [blame] | 963 | if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY) |
| 964 | skb->csum_level = 1; |
Or Gerlitz | 837052d | 2013-12-23 16:09:44 +0200 | [diff] [blame] | 965 | |
Yevgeny Petrilin | ad86107 | 2011-10-18 01:51:24 +0000 | [diff] [blame] | 966 | if (dev->features & NETIF_F_RXHASH) |
Tom Herbert | 6917441 | 2013-12-17 23:31:23 -0800 | [diff] [blame] | 967 | skb_set_hash(skb, |
| 968 | be32_to_cpu(cqe->immed_rss_invalid), |
Eric Dumazet | 0a6d424 | 2015-07-02 13:24:44 +0200 | [diff] [blame] | 969 | (ip_summed == CHECKSUM_UNNECESSARY) ? |
| 970 | PKT_HASH_TYPE_L4 : |
| 971 | PKT_HASH_TYPE_L3); |
Yevgeny Petrilin | ad86107 | 2011-10-18 01:51:24 +0000 | [diff] [blame] | 972 | |
Amir Vadai | ec693d4 | 2013-04-23 06:06:49 +0000 | [diff] [blame] | 973 | if ((be32_to_cpu(cqe->vlan_my_qpn) & |
Hadar Hen Zion | e802f8e | 2015-07-27 14:46:33 +0300 | [diff] [blame] | 974 | MLX4_CQE_CVLAN_PRESENT_MASK) && |
Amir Vadai | ec693d4 | 2013-04-23 06:06:49 +0000 | [diff] [blame] | 975 | (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) |
Patrick McHardy | 86a9bad | 2013-04-19 02:04:30 +0000 | [diff] [blame] | 976 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->sl_vid)); |
Hadar Hen Zion | e38af4f | 2015-07-27 14:46:34 +0300 | [diff] [blame] | 977 | else if ((be32_to_cpu(cqe->vlan_my_qpn) & |
| 978 | MLX4_CQE_SVLAN_PRESENT_MASK) && |
| 979 | (dev->features & NETIF_F_HW_VLAN_STAG_RX)) |
| 980 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), |
| 981 | be16_to_cpu(cqe->sl_vid)); |
Jiri Pirko | f1b553f | 2011-07-20 04:54:22 +0000 | [diff] [blame] | 982 | |
Amir Vadai | ec693d4 | 2013-04-23 06:06:49 +0000 | [diff] [blame] | 983 | if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) { |
| 984 | timestamp = mlx4_en_get_cqe_ts(cqe); |
| 985 | mlx4_en_fill_hwtstamps(mdev, skb_hwtstamps(skb), |
| 986 | timestamp); |
| 987 | } |
| 988 | |
Eric Dumazet | 868fdb0 | 2015-11-18 06:30:58 -0800 | [diff] [blame] | 989 | napi_gro_receive(&cq->napi, skb); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 990 | next: |
Thadeu Lima de Souza Cascardo | 4cce66c | 2012-07-16 07:01:53 +0000 | [diff] [blame] | 991 | for (nr = 0; nr < priv->num_frags; nr++) |
| 992 | mlx4_en_free_frag(priv, frags, nr); |
| 993 | |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 994 | ++cq->mcq.cons_index; |
| 995 | index = (cq->mcq.cons_index) & ring->size_mask; |
Ido Shamay | b1b6b4d | 2014-09-18 11:51:01 +0300 | [diff] [blame] | 996 | cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; |
Ben Hutchings | f1d29a3 | 2012-11-16 12:44:56 +0000 | [diff] [blame] | 997 | if (++polled == budget) |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 998 | goto out; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 999 | } |
| 1000 | |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1001 | out: |
| 1002 | AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled); |
| 1003 | mlx4_cq_set_ci(&cq->mcq); |
| 1004 | wmb(); /* ensure HW sees CQ consumer before we post new buffers */ |
| 1005 | ring->cons = cq->mcq.cons_index; |
Thadeu Lima de Souza Cascardo | 4cce66c | 2012-07-16 07:01:53 +0000 | [diff] [blame] | 1006 | mlx4_en_refill_rx_buffers(priv, ring); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1007 | mlx4_en_update_rx_prod_db(ring); |
| 1008 | return polled; |
| 1009 | } |
| 1010 | |
| 1011 | |
| 1012 | void mlx4_en_rx_irq(struct mlx4_cq *mcq) |
| 1013 | { |
| 1014 | struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); |
| 1015 | struct mlx4_en_priv *priv = netdev_priv(cq->dev); |
| 1016 | |
Eric Dumazet | 477b35b | 2014-10-29 16:54:45 -0700 | [diff] [blame] | 1017 | if (likely(priv->port_up)) |
| 1018 | napi_schedule_irqoff(&cq->napi); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1019 | else |
| 1020 | mlx4_en_arm_cq(priv, cq); |
| 1021 | } |
| 1022 | |
| 1023 | /* Rx CQ polling - called by NAPI */ |
| 1024 | int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) |
| 1025 | { |
| 1026 | struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); |
| 1027 | struct net_device *dev = cq->dev; |
| 1028 | struct mlx4_en_priv *priv = netdev_priv(dev); |
| 1029 | int done; |
| 1030 | |
| 1031 | done = mlx4_en_process_rx_cq(dev, cq, budget); |
| 1032 | |
| 1033 | /* If we used up all the quota - we're probably not done yet... */ |
Yuval Atias | 2eacc23 | 2014-05-14 12:15:10 +0300 | [diff] [blame] | 1034 | if (done == budget) { |
Amir Vadai | 35f6f45 | 2014-06-29 11:54:55 +0300 | [diff] [blame] | 1035 | const struct cpumask *aff; |
Thomas Gleixner | dc2ec62 | 2015-09-15 13:34:05 +0200 | [diff] [blame] | 1036 | struct irq_data *idata; |
| 1037 | int cpu_curr; |
Amir Vadai | 35f6f45 | 2014-06-29 11:54:55 +0300 | [diff] [blame] | 1038 | |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1039 | INC_PERF_COUNTER(priv->pstats.napi_quota); |
Amir Vadai | 35f6f45 | 2014-06-29 11:54:55 +0300 | [diff] [blame] | 1040 | |
| 1041 | cpu_curr = smp_processor_id(); |
Thomas Gleixner | dc2ec62 | 2015-09-15 13:34:05 +0200 | [diff] [blame] | 1042 | idata = irq_desc_get_irq_data(cq->irq_desc); |
| 1043 | aff = irq_data_get_affinity_mask(idata); |
Amir Vadai | 35f6f45 | 2014-06-29 11:54:55 +0300 | [diff] [blame] | 1044 | |
Eric Dumazet | 2e1af7d | 2014-11-10 14:07:20 -0800 | [diff] [blame] | 1045 | if (likely(cpumask_test_cpu(cpu_curr, aff))) |
| 1046 | return budget; |
| 1047 | |
| 1048 | /* Current cpu is not according to smp_irq_affinity - |
| 1049 | * probably affinity changed. need to stop this NAPI |
| 1050 | * poll, and restart it on the right CPU |
| 1051 | */ |
| 1052 | done = 0; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1053 | } |
Eric Dumazet | 1a28817 | 2014-11-06 21:10:11 -0800 | [diff] [blame] | 1054 | /* Done for now */ |
| 1055 | napi_complete_done(napi, done); |
| 1056 | mlx4_en_arm_cq(priv, cq); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1057 | return done; |
| 1058 | } |
| 1059 | |
Eric Dumazet | 51151a1 | 2013-06-23 08:17:56 -0700 | [diff] [blame] | 1060 | static const int frag_sizes[] = { |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1061 | FRAG_SZ0, |
| 1062 | FRAG_SZ1, |
| 1063 | FRAG_SZ2, |
| 1064 | FRAG_SZ3 |
| 1065 | }; |
| 1066 | |
| 1067 | void mlx4_en_calc_rx_buf(struct net_device *dev) |
| 1068 | { |
| 1069 | struct mlx4_en_priv *priv = netdev_priv(dev); |
Hadar Hen Zion | e38af4f | 2015-07-27 14:46:34 +0300 | [diff] [blame] | 1070 | /* VLAN_HLEN is added twice,to support skb vlan tagged with multiple |
| 1071 | * headers. (For example: ETH_P_8021Q and ETH_P_8021AD). |
| 1072 | */ |
| 1073 | int eff_mtu = dev->mtu + ETH_HLEN + (2 * VLAN_HLEN); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1074 | int buf_size = 0; |
| 1075 | int i = 0; |
| 1076 | |
| 1077 | while (buf_size < eff_mtu) { |
| 1078 | priv->frag_info[i].frag_size = |
| 1079 | (eff_mtu > buf_size + frag_sizes[i]) ? |
| 1080 | frag_sizes[i] : eff_mtu - buf_size; |
| 1081 | priv->frag_info[i].frag_prefix_size = buf_size; |
Ido Shamay | e8e7f01 | 2015-02-03 17:57:20 +0200 | [diff] [blame] | 1082 | priv->frag_info[i].frag_stride = |
| 1083 | ALIGN(priv->frag_info[i].frag_size, |
| 1084 | SMP_CACHE_BYTES); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1085 | buf_size += priv->frag_info[i].frag_size; |
| 1086 | i++; |
| 1087 | } |
| 1088 | |
| 1089 | priv->num_frags = i; |
| 1090 | priv->rx_skb_size = eff_mtu; |
Thadeu Lima de Souza Cascardo | 4cce66c | 2012-07-16 07:01:53 +0000 | [diff] [blame] | 1091 | priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc)); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1092 | |
Joe Perches | 1a91de2 | 2014-05-07 12:52:57 -0700 | [diff] [blame] | 1093 | en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d num_frags:%d):\n", |
| 1094 | eff_mtu, priv->num_frags); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1095 | for (i = 0; i < priv->num_frags; i++) { |
Eric Dumazet | 51151a1 | 2013-06-23 08:17:56 -0700 | [diff] [blame] | 1096 | en_err(priv, |
Ido Shamay | 5f6e980 | 2014-11-02 16:26:15 +0200 | [diff] [blame] | 1097 | " frag:%d - size:%d prefix:%d stride:%d\n", |
Eric Dumazet | 51151a1 | 2013-06-23 08:17:56 -0700 | [diff] [blame] | 1098 | i, |
| 1099 | priv->frag_info[i].frag_size, |
| 1100 | priv->frag_info[i].frag_prefix_size, |
Eric Dumazet | 51151a1 | 2013-06-23 08:17:56 -0700 | [diff] [blame] | 1101 | priv->frag_info[i].frag_stride); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1102 | } |
| 1103 | } |
| 1104 | |
| 1105 | /* RSS related functions */ |
| 1106 | |
Yevgeny Petrilin | 9f519f6 | 2009-08-06 19:28:18 -0700 | [diff] [blame] | 1107 | static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, |
| 1108 | struct mlx4_en_rx_ring *ring, |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1109 | enum mlx4_qp_state *state, |
| 1110 | struct mlx4_qp *qp) |
| 1111 | { |
| 1112 | struct mlx4_en_dev *mdev = priv->mdev; |
| 1113 | struct mlx4_qp_context *context; |
| 1114 | int err = 0; |
| 1115 | |
Joe Perches | 14f8dc4 | 2013-02-07 11:46:27 +0000 | [diff] [blame] | 1116 | context = kmalloc(sizeof(*context), GFP_KERNEL); |
| 1117 | if (!context) |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1118 | return -ENOMEM; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1119 | |
Jiri Kosina | 40f2287 | 2014-05-11 15:15:12 +0300 | [diff] [blame] | 1120 | err = mlx4_qp_alloc(mdev->dev, qpn, qp, GFP_KERNEL); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1121 | if (err) { |
Yevgeny Petrilin | 453a608 | 2009-06-01 20:27:13 +0000 | [diff] [blame] | 1122 | en_err(priv, "Failed to allocate qp #%x\n", qpn); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1123 | goto out; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1124 | } |
| 1125 | qp->event = mlx4_en_sqp_event; |
| 1126 | |
| 1127 | memset(context, 0, sizeof *context); |
Yevgeny Petrilin | 00d7d7b | 2010-08-24 03:45:20 +0000 | [diff] [blame] | 1128 | mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0, |
Amir Vadai | 0e98b52 | 2012-04-04 21:33:24 +0000 | [diff] [blame] | 1129 | qpn, ring->cqn, -1, context); |
Yevgeny Petrilin | 9f519f6 | 2009-08-06 19:28:18 -0700 | [diff] [blame] | 1130 | context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1131 | |
Yevgeny Petrilin | f3a9d1f | 2011-10-18 01:50:42 +0000 | [diff] [blame] | 1132 | /* Cancel FCS removal if FW allows */ |
Yevgeny Petrilin | 4a5f4dd | 2011-11-14 14:25:36 -0500 | [diff] [blame] | 1133 | if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) { |
Yevgeny Petrilin | f3a9d1f | 2011-10-18 01:50:42 +0000 | [diff] [blame] | 1134 | context->param3 |= cpu_to_be32(1 << 29); |
Muhammad Mahajna | f0df350 | 2015-04-02 16:31:21 +0300 | [diff] [blame] | 1135 | if (priv->dev->features & NETIF_F_RXFCS) |
| 1136 | ring->fcs_del = 0; |
| 1137 | else |
| 1138 | ring->fcs_del = ETH_FCS_LEN; |
Yevgeny Petrilin | 4a5f4dd | 2011-11-14 14:25:36 -0500 | [diff] [blame] | 1139 | } else |
| 1140 | ring->fcs_del = 0; |
Yevgeny Petrilin | f3a9d1f | 2011-10-18 01:50:42 +0000 | [diff] [blame] | 1141 | |
Yevgeny Petrilin | 9f519f6 | 2009-08-06 19:28:18 -0700 | [diff] [blame] | 1142 | err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1143 | if (err) { |
| 1144 | mlx4_qp_remove(mdev->dev, qp); |
| 1145 | mlx4_qp_free(mdev->dev, qp); |
| 1146 | } |
Yevgeny Petrilin | 9f519f6 | 2009-08-06 19:28:18 -0700 | [diff] [blame] | 1147 | mlx4_en_update_rx_prod_db(ring); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1148 | out: |
| 1149 | kfree(context); |
| 1150 | return err; |
| 1151 | } |
| 1152 | |
Hadar Hen Zion | cabdc8ee | 2012-07-05 04:03:50 +0000 | [diff] [blame] | 1153 | int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv) |
| 1154 | { |
| 1155 | int err; |
| 1156 | u32 qpn; |
| 1157 | |
Matan Barak | d57febe | 2014-12-11 10:57:57 +0200 | [diff] [blame] | 1158 | err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn, |
| 1159 | MLX4_RESERVE_A0_QP); |
Hadar Hen Zion | cabdc8ee | 2012-07-05 04:03:50 +0000 | [diff] [blame] | 1160 | if (err) { |
| 1161 | en_err(priv, "Failed reserving drop qpn\n"); |
| 1162 | return err; |
| 1163 | } |
Jiri Kosina | 40f2287 | 2014-05-11 15:15:12 +0300 | [diff] [blame] | 1164 | err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp, GFP_KERNEL); |
Hadar Hen Zion | cabdc8ee | 2012-07-05 04:03:50 +0000 | [diff] [blame] | 1165 | if (err) { |
| 1166 | en_err(priv, "Failed allocating drop qp\n"); |
| 1167 | mlx4_qp_release_range(priv->mdev->dev, qpn, 1); |
| 1168 | return err; |
| 1169 | } |
| 1170 | |
| 1171 | return 0; |
| 1172 | } |
| 1173 | |
| 1174 | void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv) |
| 1175 | { |
| 1176 | u32 qpn; |
| 1177 | |
| 1178 | qpn = priv->drop_qp.qpn; |
| 1179 | mlx4_qp_remove(priv->mdev->dev, &priv->drop_qp); |
| 1180 | mlx4_qp_free(priv->mdev->dev, &priv->drop_qp); |
| 1181 | mlx4_qp_release_range(priv->mdev->dev, qpn, 1); |
| 1182 | } |
| 1183 | |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1184 | /* Allocate rx qp's and configure them according to rss map */ |
| 1185 | int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) |
| 1186 | { |
| 1187 | struct mlx4_en_dev *mdev = priv->mdev; |
| 1188 | struct mlx4_en_rss_map *rss_map = &priv->rss_map; |
| 1189 | struct mlx4_qp_context context; |
Or Gerlitz | 876f6e6 | 2011-11-26 19:54:58 +0000 | [diff] [blame] | 1190 | struct mlx4_rss_context *rss_context; |
Yevgeny Petrilin | 93d3e36 | 2012-01-17 22:54:55 +0000 | [diff] [blame] | 1191 | int rss_rings; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1192 | void *ptr; |
Or Gerlitz | 876f6e6 | 2011-11-26 19:54:58 +0000 | [diff] [blame] | 1193 | u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 | |
Or Gerlitz | 1202d46 | 2011-11-26 19:55:02 +0000 | [diff] [blame] | 1194 | MLX4_RSS_TCP_IPV6); |
Yevgeny Petrilin | 9f519f6 | 2009-08-06 19:28:18 -0700 | [diff] [blame] | 1195 | int i, qpn; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1196 | int err = 0; |
| 1197 | int good_qps = 0; |
| 1198 | |
Yevgeny Petrilin | 453a608 | 2009-06-01 20:27:13 +0000 | [diff] [blame] | 1199 | en_dbg(DRV, priv, "Configuring rss steering\n"); |
Yevgeny Petrilin | b6b912e | 2009-08-06 19:27:51 -0700 | [diff] [blame] | 1200 | err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num, |
| 1201 | priv->rx_ring_num, |
Eugenia Emantayev | ddae034 | 2014-12-11 10:57:54 +0200 | [diff] [blame] | 1202 | &rss_map->base_qpn, 0); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1203 | if (err) { |
Yevgeny Petrilin | b6b912e | 2009-08-06 19:27:51 -0700 | [diff] [blame] | 1204 | en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1205 | return err; |
| 1206 | } |
| 1207 | |
Yevgeny Petrilin | b6b912e | 2009-08-06 19:27:51 -0700 | [diff] [blame] | 1208 | for (i = 0; i < priv->rx_ring_num; i++) { |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1209 | qpn = rss_map->base_qpn + i; |
Eugenia Emantayev | 41d942d | 2013-11-07 12:19:52 +0200 | [diff] [blame] | 1210 | err = mlx4_en_config_rss_qp(priv, qpn, priv->rx_ring[i], |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1211 | &rss_map->state[i], |
| 1212 | &rss_map->qps[i]); |
| 1213 | if (err) |
| 1214 | goto rss_err; |
| 1215 | |
| 1216 | ++good_qps; |
| 1217 | } |
| 1218 | |
| 1219 | /* Configure RSS indirection qp */ |
Jiri Kosina | 40f2287 | 2014-05-11 15:15:12 +0300 | [diff] [blame] | 1220 | err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp, GFP_KERNEL); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1221 | if (err) { |
Yevgeny Petrilin | 453a608 | 2009-06-01 20:27:13 +0000 | [diff] [blame] | 1222 | en_err(priv, "Failed to allocate RSS indirection QP\n"); |
Yevgeny Petrilin | 1679200 | 2011-03-22 22:38:31 +0000 | [diff] [blame] | 1223 | goto rss_err; |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1224 | } |
| 1225 | rss_map->indir_qp.event = mlx4_en_sqp_event; |
| 1226 | mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, |
Eugenia Emantayev | 41d942d | 2013-11-07 12:19:52 +0200 | [diff] [blame] | 1227 | priv->rx_ring[0]->cqn, -1, &context); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1228 | |
Yevgeny Petrilin | 93d3e36 | 2012-01-17 22:54:55 +0000 | [diff] [blame] | 1229 | if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num) |
| 1230 | rss_rings = priv->rx_ring_num; |
| 1231 | else |
| 1232 | rss_rings = priv->prof->rss_rings; |
| 1233 | |
Or Gerlitz | 876f6e6 | 2011-11-26 19:54:58 +0000 | [diff] [blame] | 1234 | ptr = ((void *) &context) + offsetof(struct mlx4_qp_context, pri_path) |
| 1235 | + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH; |
Joe Perches | 43d620c | 2011-06-16 19:08:06 +0000 | [diff] [blame] | 1236 | rss_context = ptr; |
Yevgeny Petrilin | 93d3e36 | 2012-01-17 22:54:55 +0000 | [diff] [blame] | 1237 | rss_context->base_qpn = cpu_to_be32(ilog2(rss_rings) << 24 | |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1238 | (rss_map->base_qpn)); |
Yevgeny Petrilin | 89efea2 | 2011-12-19 21:53:38 +0000 | [diff] [blame] | 1239 | rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn); |
Or Gerlitz | 1202d46 | 2011-11-26 19:55:02 +0000 | [diff] [blame] | 1240 | if (priv->mdev->profile.udp_rss) { |
| 1241 | rss_mask |= MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6; |
| 1242 | rss_context->base_qpn_udp = rss_context->default_qpn; |
| 1243 | } |
Or Gerlitz | 837052d | 2013-12-23 16:09:44 +0200 | [diff] [blame] | 1244 | |
| 1245 | if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { |
| 1246 | en_info(priv, "Setting RSS context tunnel type to RSS on inner headers\n"); |
| 1247 | rss_mask |= MLX4_RSS_BY_INNER_HEADERS; |
| 1248 | } |
| 1249 | |
Yevgeny Petrilin | 0533943 | 2010-08-24 03:46:42 +0000 | [diff] [blame] | 1250 | rss_context->flags = rss_mask; |
Or Gerlitz | 876f6e6 | 2011-11-26 19:54:58 +0000 | [diff] [blame] | 1251 | rss_context->hash_fn = MLX4_RSS_HASH_TOP; |
Eyal Perry | 947cbb0 | 2014-12-02 18:12:11 +0200 | [diff] [blame] | 1252 | if (priv->rss_hash_fn == ETH_RSS_HASH_XOR) { |
| 1253 | rss_context->hash_fn = MLX4_RSS_HASH_XOR; |
| 1254 | } else if (priv->rss_hash_fn == ETH_RSS_HASH_TOP) { |
| 1255 | rss_context->hash_fn = MLX4_RSS_HASH_TOP; |
| 1256 | memcpy(rss_context->rss_key, priv->rss_key, |
| 1257 | MLX4_EN_RSS_KEY_SIZE); |
Eyal Perry | 947cbb0 | 2014-12-02 18:12:11 +0200 | [diff] [blame] | 1258 | } else { |
| 1259 | en_err(priv, "Unknown RSS hash function requested\n"); |
| 1260 | err = -EINVAL; |
| 1261 | goto indir_err; |
| 1262 | } |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1263 | err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context, |
| 1264 | &rss_map->indir_qp, &rss_map->indir_state); |
| 1265 | if (err) |
| 1266 | goto indir_err; |
| 1267 | |
| 1268 | return 0; |
| 1269 | |
| 1270 | indir_err: |
| 1271 | mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state, |
| 1272 | MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); |
| 1273 | mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); |
| 1274 | mlx4_qp_free(mdev->dev, &rss_map->indir_qp); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1275 | rss_err: |
| 1276 | for (i = 0; i < good_qps; i++) { |
| 1277 | mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], |
| 1278 | MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]); |
| 1279 | mlx4_qp_remove(mdev->dev, &rss_map->qps[i]); |
| 1280 | mlx4_qp_free(mdev->dev, &rss_map->qps[i]); |
| 1281 | } |
Yevgeny Petrilin | b6b912e | 2009-08-06 19:27:51 -0700 | [diff] [blame] | 1282 | mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1283 | return err; |
| 1284 | } |
| 1285 | |
| 1286 | void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv) |
| 1287 | { |
| 1288 | struct mlx4_en_dev *mdev = priv->mdev; |
| 1289 | struct mlx4_en_rss_map *rss_map = &priv->rss_map; |
| 1290 | int i; |
| 1291 | |
| 1292 | mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state, |
| 1293 | MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); |
| 1294 | mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); |
| 1295 | mlx4_qp_free(mdev->dev, &rss_map->indir_qp); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1296 | |
Yevgeny Petrilin | b6b912e | 2009-08-06 19:27:51 -0700 | [diff] [blame] | 1297 | for (i = 0; i < priv->rx_ring_num; i++) { |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1298 | mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], |
| 1299 | MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]); |
| 1300 | mlx4_qp_remove(mdev->dev, &rss_map->qps[i]); |
| 1301 | mlx4_qp_free(mdev->dev, &rss_map->qps[i]); |
| 1302 | } |
Yevgeny Petrilin | b6b912e | 2009-08-06 19:27:51 -0700 | [diff] [blame] | 1303 | mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num); |
Yevgeny Petrilin | c27a02c | 2008-10-22 15:47:49 -0700 | [diff] [blame] | 1304 | } |