blob: c2df6c35860317e63a1473e2b69636bedaadc777 [file] [log] [blame]
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/mlx4/cq.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070036#include <linux/mlx4/qp.h>
37#include <linux/skbuff.h>
38#include <linux/if_ether.h>
39#include <linux/if_vlan.h>
40#include <linux/vmalloc.h>
41
42#include "mlx4_en.h"
43
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070044
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070045static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
46 struct mlx4_en_rx_desc *rx_desc,
Ian Campbell311761c2011-10-19 23:01:45 +000047 struct page_frag *skb_frags,
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070048 struct mlx4_en_rx_alloc *ring_alloc,
49 int i)
50{
51 struct mlx4_en_dev *mdev = priv->mdev;
52 struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
53 struct mlx4_en_rx_alloc *page_alloc = &ring_alloc[i];
54 struct page *page;
55 dma_addr_t dma;
56
57 if (page_alloc->offset == frag_info->last_offset) {
58 /* Allocate new page */
59 page = alloc_pages(GFP_ATOMIC | __GFP_COMP, MLX4_EN_ALLOC_ORDER);
60 if (!page)
61 return -ENOMEM;
62
63 skb_frags[i].page = page_alloc->page;
Ian Campbell311761c2011-10-19 23:01:45 +000064 skb_frags[i].offset = page_alloc->offset;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070065 page_alloc->page = page;
66 page_alloc->offset = frag_info->frag_align;
67 } else {
68 page = page_alloc->page;
69 get_page(page);
70
71 skb_frags[i].page = page;
Ian Campbell311761c2011-10-19 23:01:45 +000072 skb_frags[i].offset = page_alloc->offset;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070073 page_alloc->offset += frag_info->frag_stride;
74 }
75 dma = pci_map_single(mdev->pdev, page_address(skb_frags[i].page) +
Ian Campbell311761c2011-10-19 23:01:45 +000076 skb_frags[i].offset, frag_info->frag_size,
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070077 PCI_DMA_FROMDEVICE);
78 rx_desc->data[i].addr = cpu_to_be64(dma);
79 return 0;
80}
81
82static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
83 struct mlx4_en_rx_ring *ring)
84{
85 struct mlx4_en_rx_alloc *page_alloc;
86 int i;
87
88 for (i = 0; i < priv->num_frags; i++) {
89 page_alloc = &ring->page_alloc[i];
90 page_alloc->page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
91 MLX4_EN_ALLOC_ORDER);
92 if (!page_alloc->page)
93 goto out;
94
95 page_alloc->offset = priv->frag_info[i].frag_align;
Yevgeny Petrilin453a6082009-06-01 20:27:13 +000096 en_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n",
97 i, page_alloc->page);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070098 }
99 return 0;
100
101out:
102 while (i--) {
103 page_alloc = &ring->page_alloc[i];
104 put_page(page_alloc->page);
105 page_alloc->page = NULL;
106 }
107 return -ENOMEM;
108}
109
110static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
111 struct mlx4_en_rx_ring *ring)
112{
113 struct mlx4_en_rx_alloc *page_alloc;
114 int i;
115
116 for (i = 0; i < priv->num_frags; i++) {
117 page_alloc = &ring->page_alloc[i];
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000118 en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
119 i, page_count(page_alloc->page));
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700120
121 put_page(page_alloc->page);
122 page_alloc->page = NULL;
123 }
124}
125
126
127static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
128 struct mlx4_en_rx_ring *ring, int index)
129{
130 struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
131 struct skb_frag_struct *skb_frags = ring->rx_info +
132 (index << priv->log_rx_info);
133 int possible_frags;
134 int i;
135
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700136 /* Set size and memtype fields */
137 for (i = 0; i < priv->num_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000138 skb_frag_size_set(&skb_frags[i], priv->frag_info[i].frag_size);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700139 rx_desc->data[i].byte_count =
140 cpu_to_be32(priv->frag_info[i].frag_size);
141 rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key);
142 }
143
144 /* If the number of used fragments does not fill up the ring stride,
145 * remaining (unused) fragments must be padded with null address/size
146 * and a special memory key */
147 possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE;
148 for (i = priv->num_frags; i < possible_frags; i++) {
149 rx_desc->data[i].byte_count = 0;
150 rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD);
151 rx_desc->data[i].addr = 0;
152 }
153}
154
155
156static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
157 struct mlx4_en_rx_ring *ring, int index)
158{
159 struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
Ian Campbell311761c2011-10-19 23:01:45 +0000160 struct page_frag *skb_frags = ring->rx_info +
161 (index << priv->log_rx_info);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700162 int i;
163
164 for (i = 0; i < priv->num_frags; i++)
165 if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, ring->page_alloc, i))
166 goto err;
167
168 return 0;
169
170err:
171 while (i--)
172 put_page(skb_frags[i].page);
173 return -ENOMEM;
174}
175
176static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
177{
178 *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
179}
180
Yevgeny Petrilin38aab072009-05-24 03:17:11 +0000181static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
182 struct mlx4_en_rx_ring *ring,
183 int index)
184{
185 struct mlx4_en_dev *mdev = priv->mdev;
Ian Campbell311761c2011-10-19 23:01:45 +0000186 struct page_frag *skb_frags;
Yevgeny Petrilin38aab072009-05-24 03:17:11 +0000187 struct mlx4_en_rx_desc *rx_desc = ring->buf + (index << ring->log_stride);
188 dma_addr_t dma;
189 int nr;
190
191 skb_frags = ring->rx_info + (index << priv->log_rx_info);
192 for (nr = 0; nr < priv->num_frags; nr++) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000193 en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
Yevgeny Petrilin38aab072009-05-24 03:17:11 +0000194 dma = be64_to_cpu(rx_desc->data[nr].addr);
195
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200196 en_dbg(DRV, priv, "Unmapping buffer at dma:0x%llx\n", (u64) dma);
Ian Campbell311761c2011-10-19 23:01:45 +0000197 pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
Yevgeny Petrilin38aab072009-05-24 03:17:11 +0000198 PCI_DMA_FROMDEVICE);
199 put_page(skb_frags[nr].page);
200 }
201}
202
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700203static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
204{
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700205 struct mlx4_en_rx_ring *ring;
206 int ring_ind;
207 int buf_ind;
Yevgeny Petrilin38aab072009-05-24 03:17:11 +0000208 int new_size;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700209
210 for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
211 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
212 ring = &priv->rx_ring[ring_ind];
213
214 if (mlx4_en_prepare_rx_desc(priv, ring,
215 ring->actual_size)) {
216 if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000217 en_err(priv, "Failed to allocate "
218 "enough rx buffers\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700219 return -ENOMEM;
220 } else {
Yevgeny Petrilin38aab072009-05-24 03:17:11 +0000221 new_size = rounddown_pow_of_two(ring->actual_size);
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000222 en_warn(priv, "Only %d buffers allocated "
223 "reducing ring size to %d",
224 ring->actual_size, new_size);
Yevgeny Petrilin38aab072009-05-24 03:17:11 +0000225 goto reduce_rings;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700226 }
227 }
228 ring->actual_size++;
229 ring->prod++;
230 }
231 }
Yevgeny Petrilin38aab072009-05-24 03:17:11 +0000232 return 0;
233
234reduce_rings:
235 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
236 ring = &priv->rx_ring[ring_ind];
237 while (ring->actual_size > new_size) {
238 ring->actual_size--;
239 ring->prod--;
240 mlx4_en_free_rx_desc(priv, ring, ring->actual_size);
241 }
Yevgeny Petrilin38aab072009-05-24 03:17:11 +0000242 }
243
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700244 return 0;
245}
246
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700247static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
248 struct mlx4_en_rx_ring *ring)
249{
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700250 int index;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700251
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000252 en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
253 ring->cons, ring->prod);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700254
255 /* Unmap and free Rx buffers */
Yevgeny Petrilin38aab072009-05-24 03:17:11 +0000256 BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700257 while (ring->cons != ring->prod) {
258 index = ring->cons & ring->size_mask;
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000259 en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
Yevgeny Petrilin38aab072009-05-24 03:17:11 +0000260 mlx4_en_free_rx_desc(priv, ring, index);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700261 ++ring->cons;
262 }
263}
264
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700265int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
266 struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
267{
268 struct mlx4_en_dev *mdev = priv->mdev;
269 int err;
270 int tmp;
271
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700272
273 ring->prod = 0;
274 ring->cons = 0;
275 ring->size = size;
276 ring->size_mask = size - 1;
277 ring->stride = stride;
278 ring->log_stride = ffs(ring->stride) - 1;
Yevgeny Petrilin9f519f62009-08-06 19:28:18 -0700279 ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700280
281 tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
282 sizeof(struct skb_frag_struct));
283 ring->rx_info = vmalloc(tmp);
284 if (!ring->rx_info) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000285 en_err(priv, "Failed allocating rx_info ring\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700286 return -ENOMEM;
287 }
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000288 en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700289 ring->rx_info, tmp);
290
291 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
292 ring->buf_size, 2 * PAGE_SIZE);
293 if (err)
294 goto err_ring;
295
296 err = mlx4_en_map_buffer(&ring->wqres.buf);
297 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000298 en_err(priv, "Failed to map RX buffer\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700299 goto err_hwq;
300 }
301 ring->buf = ring->wqres.buf.direct.buf;
302
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700303 return 0;
304
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700305err_hwq:
306 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
307err_ring:
308 vfree(ring->rx_info);
309 ring->rx_info = NULL;
310 return err;
311}
312
313int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
314{
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700315 struct mlx4_en_rx_ring *ring;
316 int i;
317 int ring_ind;
318 int err;
319 int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
320 DS_SIZE * priv->num_frags);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700321
322 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
323 ring = &priv->rx_ring[ring_ind];
324
325 ring->prod = 0;
326 ring->cons = 0;
327 ring->actual_size = 0;
328 ring->cqn = priv->rx_cq[ring_ind].mcq.cqn;
329
330 ring->stride = stride;
Yevgeny Petrilin9f519f62009-08-06 19:28:18 -0700331 if (ring->stride <= TXBB_SIZE)
332 ring->buf += TXBB_SIZE;
333
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700334 ring->log_stride = ffs(ring->stride) - 1;
335 ring->buf_size = ring->size * ring->stride;
336
337 memset(ring->buf, 0, ring->buf_size);
338 mlx4_en_update_rx_prod_db(ring);
339
340 /* Initailize all descriptors */
341 for (i = 0; i < ring->size; i++)
342 mlx4_en_init_rx_desc(priv, ring, i);
343
344 /* Initialize page allocators */
345 err = mlx4_en_init_allocator(priv, ring);
346 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000347 en_err(priv, "Failed initializing ring allocator\n");
Yevgeny Petrilin60b18092011-04-06 23:25:45 +0000348 if (ring->stride <= TXBB_SIZE)
349 ring->buf -= TXBB_SIZE;
Yevgeny Petrilin9a4f92a2009-04-20 04:24:28 +0000350 ring_ind--;
351 goto err_allocator;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700352 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700353 }
Ingo Molnarb58515b2008-11-25 16:53:32 -0800354 err = mlx4_en_fill_rx_buffers(priv);
355 if (err)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700356 goto err_buffers;
357
358 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
359 ring = &priv->rx_ring[ring_ind];
360
Yevgeny Petrilin00d7d7b2010-08-24 03:45:20 +0000361 ring->size_mask = ring->actual_size - 1;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700362 mlx4_en_update_rx_prod_db(ring);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700363 }
364
365 return 0;
366
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700367err_buffers:
368 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
369 mlx4_en_free_rx_buf(priv, &priv->rx_ring[ring_ind]);
370
371 ring_ind = priv->rx_ring_num - 1;
372err_allocator:
373 while (ring_ind >= 0) {
Yevgeny Petrilin60b18092011-04-06 23:25:45 +0000374 if (priv->rx_ring[ring_ind].stride <= TXBB_SIZE)
375 priv->rx_ring[ring_ind].buf -= TXBB_SIZE;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700376 mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]);
377 ring_ind--;
378 }
379 return err;
380}
381
382void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
383 struct mlx4_en_rx_ring *ring)
384{
385 struct mlx4_en_dev *mdev = priv->mdev;
386
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700387 mlx4_en_unmap_buffer(&ring->wqres.buf);
Yevgeny Petrilin9f519f62009-08-06 19:28:18 -0700388 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size + TXBB_SIZE);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700389 vfree(ring->rx_info);
390 ring->rx_info = NULL;
391}
392
393void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
394 struct mlx4_en_rx_ring *ring)
395{
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700396 mlx4_en_free_rx_buf(priv, ring);
Yevgeny Petrilin9f519f62009-08-06 19:28:18 -0700397 if (ring->stride <= TXBB_SIZE)
398 ring->buf -= TXBB_SIZE;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700399 mlx4_en_destroy_allocator(priv, ring);
400}
401
402
403/* Unmap a completed descriptor and free unused pages */
404static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
405 struct mlx4_en_rx_desc *rx_desc,
Ian Campbell311761c2011-10-19 23:01:45 +0000406 struct page_frag *skb_frags,
Eric Dumazet90278c92011-10-19 18:49:52 +0000407 struct sk_buff *skb,
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700408 struct mlx4_en_rx_alloc *page_alloc,
409 int length)
410{
Eric Dumazet90278c92011-10-19 18:49:52 +0000411 struct skb_frag_struct *skb_frags_rx = skb_shinfo(skb)->frags;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700412 struct mlx4_en_dev *mdev = priv->mdev;
413 struct mlx4_en_frag_info *frag_info;
414 int nr;
415 dma_addr_t dma;
416
417 /* Collect used fragments while replacing them in the HW descirptors */
418 for (nr = 0; nr < priv->num_frags; nr++) {
419 frag_info = &priv->frag_info[nr];
420 if (length <= frag_info->frag_prefix_size)
421 break;
422
423 /* Save page reference in skb */
Ian Campbell311761c2011-10-19 23:01:45 +0000424 __skb_frag_set_page(&skb_frags_rx[nr], skb_frags[nr].page);
425 skb_frag_size_set(&skb_frags_rx[nr], skb_frags[nr].size);
426 skb_frags_rx[nr].page_offset = skb_frags[nr].offset;
Eric Dumazet90278c92011-10-19 18:49:52 +0000427 skb->truesize += frag_info->frag_stride;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700428 dma = be64_to_cpu(rx_desc->data[nr].addr);
429
430 /* Allocate a replacement page */
431 if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, page_alloc, nr))
432 goto fail;
433
434 /* Unmap buffer */
Eric Dumazet9e903e02011-10-18 21:00:24 +0000435 pci_unmap_single(mdev->pdev, dma, skb_frag_size(&skb_frags_rx[nr]),
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700436 PCI_DMA_FROMDEVICE);
437 }
438 /* Adjust size of last fragment to match actual length */
roel kluin973507c2009-08-08 23:54:21 +0000439 if (nr > 0)
Eric Dumazet9e903e02011-10-18 21:00:24 +0000440 skb_frag_size_set(&skb_frags_rx[nr - 1],
441 length - priv->frag_info[nr - 1].frag_prefix_size);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700442 return nr;
443
444fail:
445 /* Drop all accumulated fragments (which have already been replaced in
446 * the descriptor) of this packet; remaining fragments are reused... */
447 while (nr > 0) {
448 nr--;
Ian Campbell311761c2011-10-19 23:01:45 +0000449 __skb_frag_unref(&skb_frags_rx[nr]);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700450 }
451 return 0;
452}
453
454
455static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
456 struct mlx4_en_rx_desc *rx_desc,
Ian Campbell311761c2011-10-19 23:01:45 +0000457 struct page_frag *skb_frags,
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700458 struct mlx4_en_rx_alloc *page_alloc,
459 unsigned int length)
460{
461 struct mlx4_en_dev *mdev = priv->mdev;
462 struct sk_buff *skb;
463 void *va;
464 int used_frags;
465 dma_addr_t dma;
466
467 skb = dev_alloc_skb(SMALL_PACKET_SIZE + NET_IP_ALIGN);
468 if (!skb) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000469 en_dbg(RX_ERR, priv, "Failed allocating skb\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700470 return NULL;
471 }
472 skb->dev = priv->dev;
473 skb_reserve(skb, NET_IP_ALIGN);
474 skb->len = length;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700475
476 /* Get pointer to first fragment so we could copy the headers into the
477 * (linear part of the) skb */
Ian Campbell311761c2011-10-19 23:01:45 +0000478 va = page_address(skb_frags[0].page) + skb_frags[0].offset;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700479
480 if (length <= SMALL_PACKET_SIZE) {
481 /* We are copying all relevant data to the skb - temporarily
482 * synch buffers for the copy */
483 dma = be64_to_cpu(rx_desc->data[0].addr);
FUJITA Tomonorie4fc8562010-02-04 18:57:42 +0000484 dma_sync_single_for_cpu(&mdev->pdev->dev, dma, length,
485 DMA_FROM_DEVICE);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700486 skb_copy_to_linear_data(skb, va, length);
FUJITA Tomonorie4fc8562010-02-04 18:57:42 +0000487 dma_sync_single_for_device(&mdev->pdev->dev, dma, length,
488 DMA_FROM_DEVICE);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700489 skb->tail += length;
490 } else {
491
492 /* Move relevant fragments to skb */
493 used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
Eric Dumazet90278c92011-10-19 18:49:52 +0000494 skb, page_alloc, length);
Yevgeny Petrilin785a09822009-04-26 20:42:57 +0000495 if (unlikely(!used_frags)) {
496 kfree_skb(skb);
497 return NULL;
498 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700499 skb_shinfo(skb)->nr_frags = used_frags;
500
501 /* Copy headers into the skb linear buffer */
502 memcpy(skb->data, va, HEADER_COPY_SIZE);
503 skb->tail += HEADER_COPY_SIZE;
504
505 /* Skip headers in first fragment */
506 skb_shinfo(skb)->frags[0].page_offset += HEADER_COPY_SIZE;
507
508 /* Adjust size of first fragment */
Eric Dumazet9e903e02011-10-18 21:00:24 +0000509 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], HEADER_COPY_SIZE);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700510 skb->data_len = length - HEADER_COPY_SIZE;
511 }
512 return skb;
513}
514
Yevgeny Petriline7c1c2c42010-08-24 03:46:18 +0000515static void validate_loopback(struct mlx4_en_priv *priv, struct sk_buff *skb)
516{
517 int i;
518 int offset = ETH_HLEN;
519
520 for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++, offset++) {
521 if (*(skb->data + offset) != (unsigned char) (i & 0xff))
522 goto out_loopback;
523 }
524 /* Loopback found */
525 priv->loopback_ok = 1;
526
527out_loopback:
528 dev_kfree_skb_any(skb);
529}
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700530
531int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
532{
533 struct mlx4_en_priv *priv = netdev_priv(dev);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700534 struct mlx4_cqe *cqe;
535 struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
Ian Campbell311761c2011-10-19 23:01:45 +0000536 struct page_frag *skb_frags;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700537 struct mlx4_en_rx_desc *rx_desc;
538 struct sk_buff *skb;
539 int index;
540 int nr;
541 unsigned int length;
542 int polled = 0;
543 int ip_summed;
544
545 if (!priv->port_up)
546 return 0;
547
548 /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
549 * descriptor offset can be deduced from the CQE index instead of
550 * reading 'cqe->index' */
551 index = cq->mcq.cons_index & ring->size_mask;
552 cqe = &cq->buf[index];
553
554 /* Process all completed CQEs */
555 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
556 cq->mcq.cons_index & cq->size)) {
557
558 skb_frags = ring->rx_info + (index << priv->log_rx_info);
559 rx_desc = ring->buf + (index << ring->log_stride);
560
561 /*
562 * make sure we read the CQE after we read the ownership bit
563 */
564 rmb();
565
566 /* Drop packet on bad receive or bad checksum */
567 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
568 MLX4_CQE_OPCODE_ERROR)) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000569 en_err(priv, "CQE completed in error - vendor "
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700570 "syndrom:%d syndrom:%d\n",
571 ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome,
572 ((struct mlx4_err_cqe *) cqe)->syndrome);
573 goto next;
574 }
575 if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000576 en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700577 goto next;
578 }
579
580 /*
581 * Packet is OK - process it.
582 */
583 length = be32_to_cpu(cqe->byte_cnt);
Yevgeny Petrilin4a5f4dd2011-11-14 14:25:36 -0500584 length -= ring->fcs_del;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700585 ring->bytes += length;
586 ring->packets++;
587
Michał Mirosławc8c64cf2011-04-15 04:50:49 +0000588 if (likely(dev->features & NETIF_F_RXCSUM)) {
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700589 if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
590 (cqe->checksum == cpu_to_be16(0xffff))) {
Yevgeny Petrilinad043782011-10-18 01:50:56 +0000591 ring->csum_ok++;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700592 /* This packet is eligible for LRO if it is:
593 * - DIX Ethernet (type interpretation)
594 * - TCP/IP (v4)
595 * - without IP options
596 * - not an IP fragment */
Yevgeny Petrilinfa37a952010-08-24 03:46:46 +0000597 if (dev->features & NETIF_F_GRO) {
598 struct sk_buff *gro_skb = napi_get_frags(&cq->napi);
Yevgeny Petrilinebc872c2010-09-05 22:20:11 +0000599 if (!gro_skb)
600 goto next;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700601
602 nr = mlx4_en_complete_rx_desc(
603 priv, rx_desc,
Eric Dumazet90278c92011-10-19 18:49:52 +0000604 skb_frags, gro_skb,
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700605 ring->page_alloc, length);
606 if (!nr)
607 goto next;
608
Yevgeny Petrilinfa37a952010-08-24 03:46:46 +0000609 skb_shinfo(gro_skb)->nr_frags = nr;
610 gro_skb->len = length;
611 gro_skb->data_len = length;
Yevgeny Petrilinfa37a952010-08-24 03:46:46 +0000612 gro_skb->ip_summed = CHECKSUM_UNNECESSARY;
613
Jiri Pirkof1b553f2011-07-20 04:54:22 +0000614 if (cqe->vlan_my_qpn &
615 cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) {
616 u16 vid = be16_to_cpu(cqe->sl_vid);
617
618 __vlan_hwaccel_put_tag(gro_skb, vid);
619 }
620
Yevgeny Petrilinad861072011-10-18 01:51:24 +0000621 if (dev->features & NETIF_F_RXHASH)
622 gro_skb->rxhash = be32_to_cpu(cqe->immed_rss_invalid);
623
Yevgeny Petrilin3b610082011-10-18 01:51:09 +0000624 skb_record_rx_queue(gro_skb, cq->ring);
Jiri Pirkof1b553f2011-07-20 04:54:22 +0000625 napi_gro_frags(&cq->napi);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700626
627 goto next;
628 }
629
630 /* LRO not possible, complete processing here */
631 ip_summed = CHECKSUM_UNNECESSARY;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700632 } else {
633 ip_summed = CHECKSUM_NONE;
Yevgeny Petrilinad043782011-10-18 01:50:56 +0000634 ring->csum_none++;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700635 }
636 } else {
637 ip_summed = CHECKSUM_NONE;
Yevgeny Petrilinad043782011-10-18 01:50:56 +0000638 ring->csum_none++;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700639 }
640
641 skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags,
642 ring->page_alloc, length);
643 if (!skb) {
644 priv->stats.rx_dropped++;
645 goto next;
646 }
647
Yevgeny Petriline7c1c2c42010-08-24 03:46:18 +0000648 if (unlikely(priv->validate_loopback)) {
649 validate_loopback(priv, skb);
650 goto next;
651 }
652
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700653 skb->ip_summed = ip_summed;
654 skb->protocol = eth_type_trans(skb, dev);
David S. Miller0c8dfc82009-01-27 16:22:32 -0800655 skb_record_rx_queue(skb, cq->ring);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700656
Yevgeny Petrilinad861072011-10-18 01:51:24 +0000657 if (dev->features & NETIF_F_RXHASH)
658 skb->rxhash = be32_to_cpu(cqe->immed_rss_invalid);
659
Jiri Pirkof1b553f2011-07-20 04:54:22 +0000660 if (be32_to_cpu(cqe->vlan_my_qpn) &
661 MLX4_CQE_VLAN_PRESENT_MASK)
662 __vlan_hwaccel_put_tag(skb, be16_to_cpu(cqe->sl_vid));
663
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700664 /* Push it up the stack */
Jiri Pirkof1b553f2011-07-20 04:54:22 +0000665 netif_receive_skb(skb);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700666
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700667next:
668 ++cq->mcq.cons_index;
669 index = (cq->mcq.cons_index) & ring->size_mask;
670 cqe = &cq->buf[index];
671 if (++polled == budget) {
672 /* We are here because we reached the NAPI budget -
673 * flush only pending LRO sessions */
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700674 goto out;
675 }
676 }
677
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700678out:
679 AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
680 mlx4_cq_set_ci(&cq->mcq);
681 wmb(); /* ensure HW sees CQ consumer before we post new buffers */
682 ring->cons = cq->mcq.cons_index;
683 ring->prod += polled; /* Polled descriptors were realocated in place */
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700684 mlx4_en_update_rx_prod_db(ring);
685 return polled;
686}
687
688
689void mlx4_en_rx_irq(struct mlx4_cq *mcq)
690{
691 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
692 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
693
694 if (priv->port_up)
Ben Hutchings288379f2009-01-19 16:43:59 -0800695 napi_schedule(&cq->napi);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700696 else
697 mlx4_en_arm_cq(priv, cq);
698}
699
700/* Rx CQ polling - called by NAPI */
701int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
702{
703 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
704 struct net_device *dev = cq->dev;
705 struct mlx4_en_priv *priv = netdev_priv(dev);
706 int done;
707
708 done = mlx4_en_process_rx_cq(dev, cq, budget);
709
710 /* If we used up all the quota - we're probably not done yet... */
711 if (done == budget)
712 INC_PERF_COUNTER(priv->pstats.napi_quota);
713 else {
714 /* Done for now */
Ben Hutchings288379f2009-01-19 16:43:59 -0800715 napi_complete(napi);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700716 mlx4_en_arm_cq(priv, cq);
717 }
718 return done;
719}
720
721
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300722/* Calculate the last offset position that accommodates a full fragment
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700723 * (assuming fagment size = stride-align) */
724static int mlx4_en_last_alloc_offset(struct mlx4_en_priv *priv, u16 stride, u16 align)
725{
726 u16 res = MLX4_EN_ALLOC_SIZE % stride;
727 u16 offset = MLX4_EN_ALLOC_SIZE - stride - res + align;
728
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000729 en_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d "
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700730 "res:%d offset:%d\n", stride, align, res, offset);
731 return offset;
732}
733
734
735static int frag_sizes[] = {
736 FRAG_SZ0,
737 FRAG_SZ1,
738 FRAG_SZ2,
739 FRAG_SZ3
740};
741
742void mlx4_en_calc_rx_buf(struct net_device *dev)
743{
744 struct mlx4_en_priv *priv = netdev_priv(dev);
745 int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN + ETH_LLC_SNAP_SIZE;
746 int buf_size = 0;
747 int i = 0;
748
749 while (buf_size < eff_mtu) {
750 priv->frag_info[i].frag_size =
751 (eff_mtu > buf_size + frag_sizes[i]) ?
752 frag_sizes[i] : eff_mtu - buf_size;
753 priv->frag_info[i].frag_prefix_size = buf_size;
754 if (!i) {
755 priv->frag_info[i].frag_align = NET_IP_ALIGN;
756 priv->frag_info[i].frag_stride =
757 ALIGN(frag_sizes[i] + NET_IP_ALIGN, SMP_CACHE_BYTES);
758 } else {
759 priv->frag_info[i].frag_align = 0;
760 priv->frag_info[i].frag_stride =
761 ALIGN(frag_sizes[i], SMP_CACHE_BYTES);
762 }
763 priv->frag_info[i].last_offset = mlx4_en_last_alloc_offset(
764 priv, priv->frag_info[i].frag_stride,
765 priv->frag_info[i].frag_align);
766 buf_size += priv->frag_info[i].frag_size;
767 i++;
768 }
769
770 priv->num_frags = i;
771 priv->rx_skb_size = eff_mtu;
772 priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct skb_frag_struct));
773
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000774 en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700775 "num_frags:%d):\n", eff_mtu, priv->num_frags);
776 for (i = 0; i < priv->num_frags; i++) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000777 en_dbg(DRV, priv, " frag:%d - size:%d prefix:%d align:%d "
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700778 "stride:%d last_offset:%d\n", i,
779 priv->frag_info[i].frag_size,
780 priv->frag_info[i].frag_prefix_size,
781 priv->frag_info[i].frag_align,
782 priv->frag_info[i].frag_stride,
783 priv->frag_info[i].last_offset);
784 }
785}
786
787/* RSS related functions */
788
Yevgeny Petrilin9f519f62009-08-06 19:28:18 -0700789static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
790 struct mlx4_en_rx_ring *ring,
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700791 enum mlx4_qp_state *state,
792 struct mlx4_qp *qp)
793{
794 struct mlx4_en_dev *mdev = priv->mdev;
795 struct mlx4_qp_context *context;
796 int err = 0;
797
798 context = kmalloc(sizeof *context , GFP_KERNEL);
799 if (!context) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000800 en_err(priv, "Failed to allocate qp context\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700801 return -ENOMEM;
802 }
803
804 err = mlx4_qp_alloc(mdev->dev, qpn, qp);
805 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000806 en_err(priv, "Failed to allocate qp #%x\n", qpn);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700807 goto out;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700808 }
809 qp->event = mlx4_en_sqp_event;
810
811 memset(context, 0, sizeof *context);
Yevgeny Petrilin00d7d7b2010-08-24 03:45:20 +0000812 mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0,
Yevgeny Petrilin9f519f62009-08-06 19:28:18 -0700813 qpn, ring->cqn, context);
814 context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700815
Yevgeny Petrilinf3a9d1f2011-10-18 01:50:42 +0000816 /* Cancel FCS removal if FW allows */
Yevgeny Petrilin4a5f4dd2011-11-14 14:25:36 -0500817 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) {
Yevgeny Petrilinf3a9d1f2011-10-18 01:50:42 +0000818 context->param3 |= cpu_to_be32(1 << 29);
Yevgeny Petrilin4a5f4dd2011-11-14 14:25:36 -0500819 ring->fcs_del = ETH_FCS_LEN;
820 } else
821 ring->fcs_del = 0;
Yevgeny Petrilinf3a9d1f2011-10-18 01:50:42 +0000822
Yevgeny Petrilin9f519f62009-08-06 19:28:18 -0700823 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700824 if (err) {
825 mlx4_qp_remove(mdev->dev, qp);
826 mlx4_qp_free(mdev->dev, qp);
827 }
Yevgeny Petrilin9f519f62009-08-06 19:28:18 -0700828 mlx4_en_update_rx_prod_db(ring);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700829out:
830 kfree(context);
831 return err;
832}
833
834/* Allocate rx qp's and configure them according to rss map */
835int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
836{
837 struct mlx4_en_dev *mdev = priv->mdev;
838 struct mlx4_en_rss_map *rss_map = &priv->rss_map;
839 struct mlx4_qp_context context;
840 struct mlx4_en_rss_context *rss_context;
841 void *ptr;
Yevgeny Petrilin05339432010-08-24 03:46:42 +0000842 u8 rss_mask = 0x3f;
Yevgeny Petrilin9f519f62009-08-06 19:28:18 -0700843 int i, qpn;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700844 int err = 0;
845 int good_qps = 0;
Yevgeny Petrilinad861072011-10-18 01:51:24 +0000846 static const u32 rsskey[10] = { 0xD181C62C, 0xF7F4DB5B, 0x1983A2FC,
847 0x943E1ADB, 0xD9389E6B, 0xD1039C2C, 0xA74499AD,
848 0x593D56D9, 0xF3253C06, 0x2ADC1FFC};
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700849
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000850 en_dbg(DRV, priv, "Configuring rss steering\n");
Yevgeny Petrilinb6b912e2009-08-06 19:27:51 -0700851 err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num,
852 priv->rx_ring_num,
853 &rss_map->base_qpn);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700854 if (err) {
Yevgeny Petrilinb6b912e2009-08-06 19:27:51 -0700855 en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700856 return err;
857 }
858
Yevgeny Petrilinb6b912e2009-08-06 19:27:51 -0700859 for (i = 0; i < priv->rx_ring_num; i++) {
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700860 qpn = rss_map->base_qpn + i;
Yevgeny Petrilin9f519f62009-08-06 19:28:18 -0700861 err = mlx4_en_config_rss_qp(priv, qpn, &priv->rx_ring[i],
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700862 &rss_map->state[i],
863 &rss_map->qps[i]);
864 if (err)
865 goto rss_err;
866
867 ++good_qps;
868 }
869
870 /* Configure RSS indirection qp */
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700871 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp);
872 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000873 en_err(priv, "Failed to allocate RSS indirection QP\n");
Yevgeny Petrilin16792002011-03-22 22:38:31 +0000874 goto rss_err;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700875 }
876 rss_map->indir_qp.event = mlx4_en_sqp_event;
877 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
Yevgeny Petrilin9f519f62009-08-06 19:28:18 -0700878 priv->rx_ring[0].cqn, &context);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700879
880 ptr = ((void *) &context) + 0x3c;
Joe Perches43d620c2011-06-16 19:08:06 +0000881 rss_context = ptr;
Yevgeny Petrilinb6b912e2009-08-06 19:27:51 -0700882 rss_context->base_qpn = cpu_to_be32(ilog2(priv->rx_ring_num) << 24 |
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700883 (rss_map->base_qpn));
884 rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
Yevgeny Petrilin05339432010-08-24 03:46:42 +0000885 rss_context->flags = rss_mask;
Yevgeny Petrilinad861072011-10-18 01:51:24 +0000886 rss_context->hash_fn = 1;
887 for (i = 0; i < 10; i++)
888 rss_context->rss_key[i] = rsskey[i];
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700889
Yevgeny Petrilin05339432010-08-24 03:46:42 +0000890 if (priv->mdev->profile.udp_rss)
891 rss_context->base_qpn_udp = rss_context->default_qpn;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700892 err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
893 &rss_map->indir_qp, &rss_map->indir_state);
894 if (err)
895 goto indir_err;
896
897 return 0;
898
899indir_err:
900 mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
901 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
902 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
903 mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700904rss_err:
905 for (i = 0; i < good_qps; i++) {
906 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
907 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
908 mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
909 mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
910 }
Yevgeny Petrilinb6b912e2009-08-06 19:27:51 -0700911 mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700912 return err;
913}
914
915void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
916{
917 struct mlx4_en_dev *mdev = priv->mdev;
918 struct mlx4_en_rss_map *rss_map = &priv->rss_map;
919 int i;
920
921 mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
922 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
923 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
924 mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700925
Yevgeny Petrilinb6b912e2009-08-06 19:27:51 -0700926 for (i = 0; i < priv->rx_ring_num; i++) {
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700927 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
928 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
929 mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
930 mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
931 }
Yevgeny Petrilinb6b912e2009-08-06 19:27:51 -0700932 mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700933}
934
935
936
937
938