blob: 48588ddf81b0aaacd2d233f541dedee4fd31a9c5 [file] [log] [blame]
Ben Hutchings8ceee662008-04-27 12:55:59 +01001/****************************************************************************
Ben Hutchingsf7a6d2c2013-08-29 23:32:48 +01002 * Driver for Solarflare network controllers and boards
Ben Hutchings8ceee662008-04-27 12:55:59 +01003 * Copyright 2005-2006 Fen Systems Ltd.
Ben Hutchingsf7a6d2c2013-08-29 23:32:48 +01004 * Copyright 2005-2013 Solarflare Communications Inc.
Ben Hutchings8ceee662008-04-27 12:55:59 +01005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/socket.h>
12#include <linux/in.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Ben Hutchings8ceee662008-04-27 12:55:59 +010014#include <linux/ip.h>
Ben Hutchingsc47b2d92013-09-03 17:22:23 +010015#include <linux/ipv6.h>
Ben Hutchings8ceee662008-04-27 12:55:59 +010016#include <linux/tcp.h>
17#include <linux/udp.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker6eb07ca2011-09-15 19:46:05 -040019#include <linux/moduleparam.h>
Daniel Pieczko27689352013-02-13 10:54:41 +000020#include <linux/iommu.h>
Ben Hutchings8ceee662008-04-27 12:55:59 +010021#include <net/ip.h>
22#include <net/checksum.h>
23#include "net_driver.h"
Ben Hutchings8ceee662008-04-27 12:55:59 +010024#include "efx.h"
Ben Hutchingsadd72472012-11-08 01:46:53 +000025#include "filter.h"
Ben Hutchings744093c2009-11-29 15:12:08 +000026#include "nic.h"
Ben Hutchings3273c2e2008-05-07 13:36:19 +010027#include "selftest.h"
Ben Hutchings8ceee662008-04-27 12:55:59 +010028#include "workarounds.h"
29
Daniel Pieczko1648a232013-02-13 10:54:41 +000030/* Preferred number of descriptors to fill at once */
31#define EFX_RX_PREFERRED_BATCH 8U
Ben Hutchings8ceee662008-04-27 12:55:59 +010032
Daniel Pieczko27689352013-02-13 10:54:41 +000033/* Number of RX buffers to recycle pages for. When creating the RX page recycle
34 * ring, this number is divided by the number of buffers per page to calculate
35 * the number of pages to store in the RX page recycle ring.
36 */
37#define EFX_RECYCLE_RING_SIZE_IOMMU 4096
Daniel Pieczko1648a232013-02-13 10:54:41 +000038#define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
Steve Hodgson62b330b2010-06-01 11:20:53 +000039
Ben Hutchings8ceee662008-04-27 12:55:59 +010040/* Size of buffer allocated for skb header area. */
Jon Cooperd4ef5b62013-04-08 12:55:58 +010041#define EFX_SKB_HEADERS 128u
Ben Hutchings8ceee662008-04-27 12:55:59 +010042
Ben Hutchings8ceee662008-04-27 12:55:59 +010043/* This is the percentage fill level below which new RX descriptors
44 * will be added to the RX descriptor ring.
45 */
David Riddoch64235182012-04-11 13:12:41 +010046static unsigned int rx_refill_threshold;
Ben Hutchings8ceee662008-04-27 12:55:59 +010047
Ben Hutchings85740cdf2013-01-29 23:33:15 +000048/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
49#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
50 EFX_RX_USR_BUF_SIZE)
51
Ben Hutchings8ceee662008-04-27 12:55:59 +010052/*
53 * RX maximum head room required.
54 *
Ben Hutchings85740cdf2013-01-29 23:33:15 +000055 * This must be at least 1 to prevent overflow, plus one packet-worth
56 * to allow pipelined receives.
Ben Hutchings8ceee662008-04-27 12:55:59 +010057 */
Ben Hutchings85740cdf2013-01-29 23:33:15 +000058#define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
Ben Hutchings8ceee662008-04-27 12:55:59 +010059
Ben Hutchingsb184f162013-01-29 23:33:15 +000060static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
Ben Hutchings39c9cf02010-06-23 11:31:28 +000061{
Ben Hutchingsb184f162013-01-29 23:33:15 +000062 return page_address(buf->page) + buf->page_offset;
Steve Hodgsona526f142011-02-24 23:45:16 +000063}
64
Jon Cooper43a37392012-10-18 15:49:54 +010065static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
Steve Hodgsona526f142011-02-24 23:45:16 +000066{
Jon Cooper43a37392012-10-18 15:49:54 +010067#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
68 return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
Ben Hutchings39c9cf02010-06-23 11:31:28 +000069#else
Jon Cooper43a37392012-10-18 15:49:54 +010070 const u8 *data = eh + efx->rx_packet_hash_offset;
Ben Hutchings0beaca22012-01-05 18:54:04 +000071 return (u32)data[0] |
72 (u32)data[1] << 8 |
73 (u32)data[2] << 16 |
74 (u32)data[3] << 24;
Ben Hutchings39c9cf02010-06-23 11:31:28 +000075#endif
76}
77
Ben Hutchings85740cdf2013-01-29 23:33:15 +000078static inline struct efx_rx_buffer *
79efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf)
80{
81 if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask)))
82 return efx_rx_buffer(rx_queue, 0);
83 else
84 return rx_buf + 1;
85}
86
Daniel Pieczko27689352013-02-13 10:54:41 +000087static inline void efx_sync_rx_buffer(struct efx_nic *efx,
88 struct efx_rx_buffer *rx_buf,
89 unsigned int len)
90{
91 dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
92 DMA_FROM_DEVICE);
93}
94
Daniel Pieczko1648a232013-02-13 10:54:41 +000095void efx_rx_config_page_split(struct efx_nic *efx)
96{
Andrew Rybchenko2ec03012013-11-16 11:02:27 +040097 efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align,
Ben Hutchings950c54d2013-05-13 12:01:22 +000098 EFX_RX_BUF_ALIGNMENT);
Daniel Pieczko1648a232013-02-13 10:54:41 +000099 efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
100 ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
101 efx->rx_page_buf_step);
102 efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
103 efx->rx_bufs_per_page;
104 efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
105 efx->rx_bufs_per_page);
106}
107
Daniel Pieczko27689352013-02-13 10:54:41 +0000108/* Check the RX page recycle ring for a page that can be reused. */
109static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
110{
111 struct efx_nic *efx = rx_queue->efx;
112 struct page *page;
113 struct efx_rx_page_state *state;
114 unsigned index;
115
116 index = rx_queue->page_remove & rx_queue->page_ptr_mask;
117 page = rx_queue->page_ring[index];
118 if (page == NULL)
119 return NULL;
120
121 rx_queue->page_ring[index] = NULL;
122 /* page_remove cannot exceed page_add. */
123 if (rx_queue->page_remove != rx_queue->page_add)
124 ++rx_queue->page_remove;
125
126 /* If page_count is 1 then we hold the only reference to this page. */
127 if (page_count(page) == 1) {
128 ++rx_queue->page_recycle_count;
129 return page;
130 } else {
131 state = page_address(page);
132 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
133 PAGE_SIZE << efx->rx_buffer_order,
134 DMA_FROM_DEVICE);
135 put_page(page);
136 ++rx_queue->page_recycle_failed;
137 }
138
139 return NULL;
140}
141
Ben Hutchings8ceee662008-04-27 12:55:59 +0100142/**
Alexandre Rames97d48a12013-01-11 12:26:21 +0000143 * efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
Ben Hutchings8ceee662008-04-27 12:55:59 +0100144 *
145 * @rx_queue: Efx RX queue
Ben Hutchings8ceee662008-04-27 12:55:59 +0100146 *
Daniel Pieczko1648a232013-02-13 10:54:41 +0000147 * This allocates a batch of pages, maps them for DMA, and populates
148 * struct efx_rx_buffers for each one. Return a negative error code or
149 * 0 on success. If a single page can be used for multiple buffers,
150 * then the page will either be inserted fully, or not at all.
Ben Hutchings8ceee662008-04-27 12:55:59 +0100151 */
Jon Coopercce28792013-10-02 11:04:14 +0100152static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100153{
154 struct efx_nic *efx = rx_queue->efx;
Steve Hodgsonf7d6f372010-06-01 11:33:17 +0000155 struct efx_rx_buffer *rx_buf;
156 struct page *page;
Ben Hutchingsb590ace2013-01-10 23:51:54 +0000157 unsigned int page_offset;
Steve Hodgson62b330b2010-06-01 11:20:53 +0000158 struct efx_rx_page_state *state;
Steve Hodgsonf7d6f372010-06-01 11:33:17 +0000159 dma_addr_t dma_addr;
160 unsigned index, count;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100161
Daniel Pieczko1648a232013-02-13 10:54:41 +0000162 count = 0;
163 do {
Daniel Pieczko27689352013-02-13 10:54:41 +0000164 page = efx_reuse_page(rx_queue);
165 if (page == NULL) {
Jon Coopercce28792013-10-02 11:04:14 +0100166 page = alloc_pages(__GFP_COLD | __GFP_COMP |
167 (atomic ? GFP_ATOMIC : GFP_KERNEL),
Daniel Pieczko27689352013-02-13 10:54:41 +0000168 efx->rx_buffer_order);
169 if (unlikely(page == NULL))
170 return -ENOMEM;
171 dma_addr =
172 dma_map_page(&efx->pci_dev->dev, page, 0,
173 PAGE_SIZE << efx->rx_buffer_order,
174 DMA_FROM_DEVICE);
175 if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
176 dma_addr))) {
177 __free_pages(page, efx->rx_buffer_order);
178 return -EIO;
179 }
180 state = page_address(page);
181 state->dma_addr = dma_addr;
182 } else {
183 state = page_address(page);
184 dma_addr = state->dma_addr;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100185 }
Steve Hodgson62b330b2010-06-01 11:20:53 +0000186
Steve Hodgson62b330b2010-06-01 11:20:53 +0000187 dma_addr += sizeof(struct efx_rx_page_state);
Ben Hutchingsb590ace2013-01-10 23:51:54 +0000188 page_offset = sizeof(struct efx_rx_page_state);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100189
Daniel Pieczko1648a232013-02-13 10:54:41 +0000190 do {
191 index = rx_queue->added_count & rx_queue->ptr_mask;
192 rx_buf = efx_rx_buffer(rx_queue, index);
Andrew Rybchenko2ec03012013-11-16 11:02:27 +0400193 rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
Daniel Pieczko1648a232013-02-13 10:54:41 +0000194 rx_buf->page = page;
Andrew Rybchenko2ec03012013-11-16 11:02:27 +0400195 rx_buf->page_offset = page_offset + efx->rx_ip_align;
Daniel Pieczko1648a232013-02-13 10:54:41 +0000196 rx_buf->len = efx->rx_dma_len;
Ben Hutchings179ea7f2013-03-07 16:31:17 +0000197 rx_buf->flags = 0;
Daniel Pieczko1648a232013-02-13 10:54:41 +0000198 ++rx_queue->added_count;
199 get_page(page);
200 dma_addr += efx->rx_page_buf_step;
201 page_offset += efx->rx_page_buf_step;
202 } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
Ben Hutchings179ea7f2013-03-07 16:31:17 +0000203
204 rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
Daniel Pieczko1648a232013-02-13 10:54:41 +0000205 } while (++count < efx->rx_pages_per_batch);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100206
Ben Hutchings8ceee662008-04-27 12:55:59 +0100207 return 0;
208}
209
Daniel Pieczko27689352013-02-13 10:54:41 +0000210/* Unmap a DMA-mapped page. This function is only called for the final RX
211 * buffer in a page.
212 */
Ben Hutchings4d566062008-09-01 12:47:12 +0100213static void efx_unmap_rx_buffer(struct efx_nic *efx,
Daniel Pieczko27689352013-02-13 10:54:41 +0000214 struct efx_rx_buffer *rx_buf)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100215{
Daniel Pieczko27689352013-02-13 10:54:41 +0000216 struct page *page = rx_buf->page;
Steve Hodgson62b330b2010-06-01 11:20:53 +0000217
Daniel Pieczko27689352013-02-13 10:54:41 +0000218 if (page) {
219 struct efx_rx_page_state *state = page_address(page);
220 dma_unmap_page(&efx->pci_dev->dev,
221 state->dma_addr,
222 PAGE_SIZE << efx->rx_buffer_order,
223 DMA_FROM_DEVICE);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100224 }
225}
226
Daniel Pieczko27689352013-02-13 10:54:41 +0000227static void efx_free_rx_buffer(struct efx_rx_buffer *rx_buf)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100228{
Alexandre Rames97d48a12013-01-11 12:26:21 +0000229 if (rx_buf->page) {
Daniel Pieczko27689352013-02-13 10:54:41 +0000230 put_page(rx_buf->page);
Alexandre Rames97d48a12013-01-11 12:26:21 +0000231 rx_buf->page = NULL;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100232 }
233}
234
Daniel Pieczko27689352013-02-13 10:54:41 +0000235/* Attempt to recycle the page if there is an RX recycle ring; the page can
236 * only be added if this is the final RX buffer, to prevent pages being used in
237 * the descriptor ring and appearing in the recycle ring simultaneously.
238 */
239static void efx_recycle_rx_page(struct efx_channel *channel,
240 struct efx_rx_buffer *rx_buf)
241{
242 struct page *page = rx_buf->page;
243 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
244 struct efx_nic *efx = rx_queue->efx;
245 unsigned index;
246
247 /* Only recycle the page after processing the final buffer. */
Ben Hutchings179ea7f2013-03-07 16:31:17 +0000248 if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
Daniel Pieczko27689352013-02-13 10:54:41 +0000249 return;
250
251 index = rx_queue->page_add & rx_queue->page_ptr_mask;
252 if (rx_queue->page_ring[index] == NULL) {
253 unsigned read_index = rx_queue->page_remove &
254 rx_queue->page_ptr_mask;
255
256 /* The next slot in the recycle ring is available, but
257 * increment page_remove if the read pointer currently
258 * points here.
259 */
260 if (read_index == index)
261 ++rx_queue->page_remove;
262 rx_queue->page_ring[index] = page;
263 ++rx_queue->page_add;
264 return;
265 }
266 ++rx_queue->page_recycle_full;
267 efx_unmap_rx_buffer(efx, rx_buf);
268 put_page(rx_buf->page);
269}
270
Ben Hutchings4d566062008-09-01 12:47:12 +0100271static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
272 struct efx_rx_buffer *rx_buf)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100273{
Daniel Pieczko27689352013-02-13 10:54:41 +0000274 /* Release the page reference we hold for the buffer. */
275 if (rx_buf->page)
276 put_page(rx_buf->page);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100277
Daniel Pieczko27689352013-02-13 10:54:41 +0000278 /* If this is the last buffer in a page, unmap and free it. */
Ben Hutchings179ea7f2013-03-07 16:31:17 +0000279 if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
Daniel Pieczko27689352013-02-13 10:54:41 +0000280 efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
281 efx_free_rx_buffer(rx_buf);
Steve Hodgson62b330b2010-06-01 11:20:53 +0000282 }
Daniel Pieczko27689352013-02-13 10:54:41 +0000283 rx_buf->page = NULL;
Steve Hodgson24455802010-06-01 11:20:34 +0000284}
285
Daniel Pieczko27689352013-02-13 10:54:41 +0000286/* Recycle the pages that are used by buffers that have just been received. */
Ben Hutchings734d4e12013-07-04 23:48:46 +0100287static void efx_recycle_rx_pages(struct efx_channel *channel,
288 struct efx_rx_buffer *rx_buf,
289 unsigned int n_frags)
Steve Hodgson24455802010-06-01 11:20:34 +0000290{
Ben Hutchingsf7d12cd2010-09-10 06:41:47 +0000291 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
Steve Hodgson24455802010-06-01 11:20:34 +0000292
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000293 do {
Daniel Pieczko27689352013-02-13 10:54:41 +0000294 efx_recycle_rx_page(channel, rx_buf);
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000295 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
296 } while (--n_frags);
Steve Hodgson24455802010-06-01 11:20:34 +0000297}
298
Ben Hutchings734d4e12013-07-04 23:48:46 +0100299static void efx_discard_rx_packet(struct efx_channel *channel,
300 struct efx_rx_buffer *rx_buf,
301 unsigned int n_frags)
302{
303 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
304
305 efx_recycle_rx_pages(channel, rx_buf, n_frags);
306
307 do {
308 efx_free_rx_buffer(rx_buf);
309 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
310 } while (--n_frags);
311}
312
Ben Hutchings8ceee662008-04-27 12:55:59 +0100313/**
314 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
315 * @rx_queue: RX descriptor queue
Ben Hutchings49ce9c22012-07-10 10:56:00 +0000316 *
Ben Hutchings8ceee662008-04-27 12:55:59 +0100317 * This will aim to fill the RX descriptor queue up to
David Riddochda9ca502012-04-11 13:09:24 +0100318 * @rx_queue->@max_fill. If there is insufficient atomic
Steve Hodgson90d683a2010-06-01 11:19:39 +0000319 * memory to do so, a slow fill will be scheduled.
320 *
321 * The caller must provide serialisation (none is used here). In practise,
322 * this means this function must run from the NAPI handler, or be called
323 * when NAPI is disabled.
Ben Hutchings8ceee662008-04-27 12:55:59 +0100324 */
Jon Coopercce28792013-10-02 11:04:14 +0100325void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100326{
Daniel Pieczko1648a232013-02-13 10:54:41 +0000327 struct efx_nic *efx = rx_queue->efx;
328 unsigned int fill_level, batch_size;
Steve Hodgsonf7d6f372010-06-01 11:33:17 +0000329 int space, rc = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100330
Ben Hutchingsd8aec742013-05-27 16:52:54 +0100331 if (!rx_queue->refill_enabled)
332 return;
333
Steve Hodgson90d683a2010-06-01 11:19:39 +0000334 /* Calculate current fill level, and exit if we don't need to fill */
Ben Hutchings8ceee662008-04-27 12:55:59 +0100335 fill_level = (rx_queue->added_count - rx_queue->removed_count);
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000336 EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100337 if (fill_level >= rx_queue->fast_fill_trigger)
Steve Hodgson24455802010-06-01 11:20:34 +0000338 goto out;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100339
340 /* Record minimum fill level */
Ben Hutchingsb3475642008-05-16 21:15:49 +0100341 if (unlikely(fill_level < rx_queue->min_fill)) {
Ben Hutchings8ceee662008-04-27 12:55:59 +0100342 if (fill_level)
343 rx_queue->min_fill = fill_level;
Ben Hutchingsb3475642008-05-16 21:15:49 +0100344 }
Ben Hutchings8ceee662008-04-27 12:55:59 +0100345
Daniel Pieczko1648a232013-02-13 10:54:41 +0000346 batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
David Riddochda9ca502012-04-11 13:09:24 +0100347 space = rx_queue->max_fill - fill_level;
Daniel Pieczko1648a232013-02-13 10:54:41 +0000348 EFX_BUG_ON_PARANOID(space < batch_size);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100349
Ben Hutchings62776d02010-06-23 11:30:07 +0000350 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
351 "RX queue %d fast-filling descriptor ring from"
Alexandre Rames97d48a12013-01-11 12:26:21 +0000352 " level %d to level %d\n",
Ben Hutchingsba1e8a32010-09-10 06:41:36 +0000353 efx_rx_queue_index(rx_queue), fill_level,
Alexandre Rames97d48a12013-01-11 12:26:21 +0000354 rx_queue->max_fill);
355
Ben Hutchings8ceee662008-04-27 12:55:59 +0100356
357 do {
Jon Coopercce28792013-10-02 11:04:14 +0100358 rc = efx_init_rx_buffers(rx_queue, atomic);
Steve Hodgsonf7d6f372010-06-01 11:33:17 +0000359 if (unlikely(rc)) {
360 /* Ensure that we don't leave the rx queue empty */
361 if (rx_queue->added_count == rx_queue->removed_count)
362 efx_schedule_slow_fill(rx_queue);
363 goto out;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100364 }
Daniel Pieczko1648a232013-02-13 10:54:41 +0000365 } while ((space -= batch_size) >= batch_size);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100366
Ben Hutchings62776d02010-06-23 11:30:07 +0000367 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
368 "RX queue %d fast-filled descriptor ring "
Ben Hutchingsba1e8a32010-09-10 06:41:36 +0000369 "to level %d\n", efx_rx_queue_index(rx_queue),
Ben Hutchings62776d02010-06-23 11:30:07 +0000370 rx_queue->added_count - rx_queue->removed_count);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100371
372 out:
Steve Hodgson24455802010-06-01 11:20:34 +0000373 if (rx_queue->notified_count != rx_queue->added_count)
374 efx_nic_notify_rx_desc(rx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100375}
376
Steve Hodgson90d683a2010-06-01 11:19:39 +0000377void efx_rx_slow_fill(unsigned long context)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100378{
Steve Hodgson90d683a2010-06-01 11:19:39 +0000379 struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100380
Steve Hodgson90d683a2010-06-01 11:19:39 +0000381 /* Post an event to cause NAPI to run and refill the queue */
Ben Hutchings2ae75da2012-02-07 23:49:52 +0000382 efx_nic_generate_fill_event(rx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100383 ++rx_queue->slow_fill_count;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100384}
385
Ben Hutchings4d566062008-09-01 12:47:12 +0100386static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
387 struct efx_rx_buffer *rx_buf,
Alexandre Rames97d48a12013-01-11 12:26:21 +0000388 int len)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100389{
390 struct efx_nic *efx = rx_queue->efx;
391 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
392
393 if (likely(len <= max_len))
394 return;
395
396 /* The packet must be discarded, but this is only a fatal error
397 * if the caller indicated it was
398 */
Ben Hutchingsdb339562011-08-26 18:05:11 +0100399 rx_buf->flags |= EFX_RX_PKT_DISCARD;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100400
401 if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
Ben Hutchings62776d02010-06-23 11:30:07 +0000402 if (net_ratelimit())
403 netif_err(efx, rx_err, efx->net_dev,
404 " RX queue %d seriously overlength "
405 "RX event (0x%x > 0x%x+0x%x). Leaking\n",
Ben Hutchingsba1e8a32010-09-10 06:41:36 +0000406 efx_rx_queue_index(rx_queue), len, max_len,
Ben Hutchings62776d02010-06-23 11:30:07 +0000407 efx->type->rx_buffer_padding);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100408 efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
409 } else {
Ben Hutchings62776d02010-06-23 11:30:07 +0000410 if (net_ratelimit())
411 netif_err(efx, rx_err, efx->net_dev,
412 " RX queue %d overlength RX event "
413 "(0x%x > 0x%x)\n",
Ben Hutchingsba1e8a32010-09-10 06:41:36 +0000414 efx_rx_queue_index(rx_queue), len, max_len);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100415 }
416
Ben Hutchingsba1e8a32010-09-10 06:41:36 +0000417 efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100418}
419
Ben Hutchings61321d92012-02-25 01:58:35 +0000420/* Pass a received packet up through GRO. GRO can handle pages
421 * regardless of checksum state and skbs with a good checksum.
Ben Hutchings8ceee662008-04-27 12:55:59 +0100422 */
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000423static void
424efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
425 unsigned int n_frags, u8 *eh)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100426{
Herbert Xuda3bc072009-01-18 21:50:16 -0800427 struct napi_struct *napi = &channel->napi_str;
Ben Hutchings18e1d2b2009-10-29 07:21:24 +0000428 gro_result_t gro_result;
Alexandre Rames97d48a12013-01-11 12:26:21 +0000429 struct efx_nic *efx = channel->efx;
Alexandre Rames97d48a12013-01-11 12:26:21 +0000430 struct sk_buff *skb;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100431
Alexandre Rames97d48a12013-01-11 12:26:21 +0000432 skb = napi_get_frags(napi);
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000433 if (unlikely(!skb)) {
434 while (n_frags--) {
435 put_page(rx_buf->page);
436 rx_buf->page = NULL;
437 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
438 }
Alexandre Rames97d48a12013-01-11 12:26:21 +0000439 return;
440 }
Ben Hutchings1241e952009-11-23 16:02:25 +0000441
Alexandre Rames97d48a12013-01-11 12:26:21 +0000442 if (efx->net_dev->features & NETIF_F_RXHASH)
Tom Herbertc7cb38a2013-12-17 23:31:50 -0800443 skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
444 PKT_HASH_TYPE_L3);
Alexandre Rames97d48a12013-01-11 12:26:21 +0000445 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
446 CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100447
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000448 for (;;) {
449 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
450 rx_buf->page, rx_buf->page_offset,
451 rx_buf->len);
452 rx_buf->page = NULL;
453 skb->len += rx_buf->len;
454 if (skb_shinfo(skb)->nr_frags == n_frags)
455 break;
456
457 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
458 }
459
460 skb->data_len = skb->len;
461 skb->truesize += n_frags * efx->rx_buffer_truesize;
462
Alexandre Rames97d48a12013-01-11 12:26:21 +0000463 skb_record_rx_queue(skb, channel->rx_queue.core_index);
Ben Hutchings3eadb7b2009-11-23 16:02:40 +0000464
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000465 gro_result = napi_gro_frags(napi);
Alexandre Rames97d48a12013-01-11 12:26:21 +0000466 if (gro_result != GRO_DROP)
Ben Hutchings18e1d2b2009-10-29 07:21:24 +0000467 channel->irq_mod_score += 2;
Alexandre Rames97d48a12013-01-11 12:26:21 +0000468}
469
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000470/* Allocate and construct an SKB around page fragments */
Alexandre Rames97d48a12013-01-11 12:26:21 +0000471static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
472 struct efx_rx_buffer *rx_buf,
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000473 unsigned int n_frags,
Alexandre Rames97d48a12013-01-11 12:26:21 +0000474 u8 *eh, int hdr_len)
475{
476 struct efx_nic *efx = channel->efx;
477 struct sk_buff *skb;
478
479 /* Allocate an SKB to store the headers */
Ben Hutchings2ccd0b12013-11-28 18:58:11 +0000480 skb = netdev_alloc_skb(efx->net_dev,
481 efx->rx_ip_align + efx->rx_prefix_size +
482 hdr_len);
Alexandre Rames97d48a12013-01-11 12:26:21 +0000483 if (unlikely(skb == NULL))
484 return NULL;
485
486 EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
487
Ben Hutchings2ccd0b12013-11-28 18:58:11 +0000488 memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
489 efx->rx_prefix_size + hdr_len);
490 skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size);
491 __skb_put(skb, hdr_len);
Alexandre Rames97d48a12013-01-11 12:26:21 +0000492
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000493 /* Append the remaining page(s) onto the frag list */
Alexandre Rames97d48a12013-01-11 12:26:21 +0000494 if (rx_buf->len > hdr_len) {
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000495 rx_buf->page_offset += hdr_len;
496 rx_buf->len -= hdr_len;
497
498 for (;;) {
499 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
500 rx_buf->page, rx_buf->page_offset,
501 rx_buf->len);
502 rx_buf->page = NULL;
503 skb->len += rx_buf->len;
504 skb->data_len += rx_buf->len;
505 if (skb_shinfo(skb)->nr_frags == n_frags)
506 break;
507
508 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
509 }
Alexandre Rames97d48a12013-01-11 12:26:21 +0000510 } else {
511 __free_pages(rx_buf->page, efx->rx_buffer_order);
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000512 rx_buf->page = NULL;
513 n_frags = 0;
Ben Hutchings18e1d2b2009-10-29 07:21:24 +0000514 }
Alexandre Rames97d48a12013-01-11 12:26:21 +0000515
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000516 skb->truesize += n_frags * efx->rx_buffer_truesize;
Alexandre Rames97d48a12013-01-11 12:26:21 +0000517
518 /* Move past the ethernet header */
519 skb->protocol = eth_type_trans(skb, efx->net_dev);
520
521 return skb;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100522}
523
Ben Hutchings8ceee662008-04-27 12:55:59 +0100524void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000525 unsigned int n_frags, unsigned int len, u16 flags)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100526{
527 struct efx_nic *efx = rx_queue->efx;
Ben Hutchingsba1e8a32010-09-10 06:41:36 +0000528 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100529 struct efx_rx_buffer *rx_buf;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100530
531 rx_buf = efx_rx_buffer(rx_queue, index);
Ben Hutchings179ea7f2013-03-07 16:31:17 +0000532 rx_buf->flags |= flags;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100533
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000534 /* Validate the number of fragments and completed length */
535 if (n_frags == 1) {
Ben Hutchings3dced742013-04-27 01:55:18 +0100536 if (!(flags & EFX_RX_PKT_PREFIX_LEN))
537 efx_rx_packet__check_len(rx_queue, rx_buf, len);
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000538 } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
Jon Coopere8c68c02013-03-08 10:18:28 +0000539 unlikely(len <= (n_frags - 1) * efx->rx_dma_len) ||
540 unlikely(len > n_frags * efx->rx_dma_len) ||
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000541 unlikely(!efx->rx_scatter)) {
542 /* If this isn't an explicit discard request, either
543 * the hardware or the driver is broken.
544 */
545 WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD));
546 rx_buf->flags |= EFX_RX_PKT_DISCARD;
547 }
Ben Hutchings8ceee662008-04-27 12:55:59 +0100548
Ben Hutchings62776d02010-06-23 11:30:07 +0000549 netif_vdbg(efx, rx_status, efx->net_dev,
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000550 "RX queue %d received ids %x-%x len %d %s%s\n",
Ben Hutchingsba1e8a32010-09-10 06:41:36 +0000551 efx_rx_queue_index(rx_queue), index,
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000552 (index + n_frags - 1) & rx_queue->ptr_mask, len,
Ben Hutchingsdb339562011-08-26 18:05:11 +0100553 (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
554 (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
Ben Hutchings8ceee662008-04-27 12:55:59 +0100555
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000556 /* Discard packet, if instructed to do so. Process the
557 * previous receive first.
558 */
Ben Hutchingsdb339562011-08-26 18:05:11 +0100559 if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000560 efx_rx_flush_packet(channel);
Ben Hutchings734d4e12013-07-04 23:48:46 +0100561 efx_discard_rx_packet(channel, rx_buf, n_frags);
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000562 return;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100563 }
564
Ben Hutchings3dced742013-04-27 01:55:18 +0100565 if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN))
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000566 rx_buf->len = len;
567
Daniel Pieczko27689352013-02-13 10:54:41 +0000568 /* Release and/or sync the DMA mapping - assumes all RX buffers
569 * consumed in-order per RX queue.
Ben Hutchings8ceee662008-04-27 12:55:59 +0100570 */
Daniel Pieczko27689352013-02-13 10:54:41 +0000571 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100572
573 /* Prefetch nice and early so data will (hopefully) be in cache by
574 * the time we look at it.
575 */
Ben Hutchings5036b7c2013-01-29 23:33:15 +0000576 prefetch(efx_rx_buf_va(rx_buf));
Ben Hutchings8ceee662008-04-27 12:55:59 +0100577
Jon Cooper43a37392012-10-18 15:49:54 +0100578 rx_buf->page_offset += efx->rx_prefix_size;
579 rx_buf->len -= efx->rx_prefix_size;
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000580
581 if (n_frags > 1) {
582 /* Release/sync DMA mapping for additional fragments.
583 * Fix length for last fragment.
584 */
585 unsigned int tail_frags = n_frags - 1;
586
587 for (;;) {
588 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
589 if (--tail_frags == 0)
590 break;
Jon Coopere8c68c02013-03-08 10:18:28 +0000591 efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000592 }
Jon Coopere8c68c02013-03-08 10:18:28 +0000593 rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
Daniel Pieczko27689352013-02-13 10:54:41 +0000594 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000595 }
Ben Hutchingsb74e3e82013-01-29 23:33:15 +0000596
Ben Hutchings734d4e12013-07-04 23:48:46 +0100597 /* All fragments have been DMA-synced, so recycle pages. */
Daniel Pieczko27689352013-02-13 10:54:41 +0000598 rx_buf = efx_rx_buffer(rx_queue, index);
Ben Hutchings734d4e12013-07-04 23:48:46 +0100599 efx_recycle_rx_pages(channel, rx_buf, n_frags);
Daniel Pieczko27689352013-02-13 10:54:41 +0000600
Ben Hutchings8ceee662008-04-27 12:55:59 +0100601 /* Pipeline receives so that we give time for packet headers to be
602 * prefetched into cache.
603 */
Ben Hutchingsff734ef2013-01-29 23:33:14 +0000604 efx_rx_flush_packet(channel);
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000605 channel->rx_pkt_n_frags = n_frags;
606 channel->rx_pkt_index = index;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100607}
608
Alexandre Rames97d48a12013-01-11 12:26:21 +0000609static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000610 struct efx_rx_buffer *rx_buf,
611 unsigned int n_frags)
Ben Hutchings1ddceb42012-01-23 22:41:30 +0000612{
613 struct sk_buff *skb;
Alexandre Rames97d48a12013-01-11 12:26:21 +0000614 u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS);
Ben Hutchings1ddceb42012-01-23 22:41:30 +0000615
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000616 skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
Alexandre Rames97d48a12013-01-11 12:26:21 +0000617 if (unlikely(skb == NULL)) {
Daniel Pieczko27689352013-02-13 10:54:41 +0000618 efx_free_rx_buffer(rx_buf);
Alexandre Rames97d48a12013-01-11 12:26:21 +0000619 return;
620 }
621 skb_record_rx_queue(skb, channel->rx_queue.core_index);
Ben Hutchings1ddceb42012-01-23 22:41:30 +0000622
623 /* Set the SKB flags */
624 skb_checksum_none_assert(skb);
Jon Cooperc99dffc2013-04-08 12:49:48 +0100625 if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED))
626 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ben Hutchings1ddceb42012-01-23 22:41:30 +0000627
Jon Cooperbd9a2652013-11-18 12:54:41 +0000628 efx_rx_skb_attach_timestamp(channel, skb);
629
Stuart Hodgsonc31e5f92012-07-18 09:52:11 +0100630 if (channel->type->receive_skb)
Ben Hutchings4a74dc652013-03-05 20:13:54 +0000631 if (channel->type->receive_skb(channel, skb))
Alexandre Rames97d48a12013-01-11 12:26:21 +0000632 return;
Ben Hutchings1ddceb42012-01-23 22:41:30 +0000633
Ben Hutchings4a74dc652013-03-05 20:13:54 +0000634 /* Pass the packet up */
635 netif_receive_skb(skb);
Ben Hutchings1ddceb42012-01-23 22:41:30 +0000636}
637
Ben Hutchings8ceee662008-04-27 12:55:59 +0100638/* Handle a received packet. Second half: Touches packet payload. */
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000639void __efx_rx_packet(struct efx_channel *channel)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100640{
641 struct efx_nic *efx = channel->efx;
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000642 struct efx_rx_buffer *rx_buf =
643 efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
Ben Hutchingsb74e3e82013-01-29 23:33:15 +0000644 u8 *eh = efx_rx_buf_va(rx_buf);
Ben Hutchings604f6042010-06-25 07:05:33 +0000645
Ben Hutchings3dced742013-04-27 01:55:18 +0100646 /* Read length from the prefix if necessary. This already
647 * excludes the length of the prefix itself.
648 */
649 if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN)
650 rx_buf->len = le16_to_cpup((__le16 *)
651 (eh + efx->rx_packet_len_offset));
652
Ben Hutchings3273c2e2008-05-07 13:36:19 +0100653 /* If we're in loopback test, then pass the packet directly to the
654 * loopback layer, and free the rx_buf here
655 */
656 if (unlikely(efx->loopback_selftest)) {
Steve Hodgsona526f142011-02-24 23:45:16 +0000657 efx_loopback_rx_packet(efx, eh, rx_buf->len);
Daniel Pieczko27689352013-02-13 10:54:41 +0000658 efx_free_rx_buffer(rx_buf);
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000659 goto out;
Ben Hutchings3273c2e2008-05-07 13:36:19 +0100660 }
661
Ben Hutchingsabfe9032011-04-05 15:00:02 +0100662 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
Ben Hutchingsdb339562011-08-26 18:05:11 +0100663 rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
Ben Hutchingsab3cf6d2011-04-01 22:20:06 +0100664
Ben Hutchingse79255d2013-05-16 18:38:13 +0100665 if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb)
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000666 efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
Ben Hutchings1ddceb42012-01-23 22:41:30 +0000667 else
Ben Hutchings85740cdf2013-01-29 23:33:15 +0000668 efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
669out:
670 channel->rx_pkt_n_frags = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100671}
672
673int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
674{
675 struct efx_nic *efx = rx_queue->efx;
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000676 unsigned int entries;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100677 int rc;
678
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000679 /* Create the smallest power-of-two aligned ring */
680 entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
681 EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
682 rx_queue->ptr_mask = entries - 1;
683
Ben Hutchings62776d02010-06-23 11:30:07 +0000684 netif_dbg(efx, probe, efx->net_dev,
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000685 "creating RX queue %d size %#x mask %#x\n",
686 efx_rx_queue_index(rx_queue), efx->rxq_entries,
687 rx_queue->ptr_mask);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100688
689 /* Allocate RX buffers */
Thomas Meyerc2e4e252011-12-02 12:36:13 +0000690 rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000691 GFP_KERNEL);
Ben Hutchings8831da72008-09-01 12:47:48 +0100692 if (!rx_queue->buffer)
693 return -ENOMEM;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100694
Ben Hutchings152b6a62009-11-29 03:43:56 +0000695 rc = efx_nic_probe_rx(rx_queue);
Ben Hutchings8831da72008-09-01 12:47:48 +0100696 if (rc) {
697 kfree(rx_queue->buffer);
698 rx_queue->buffer = NULL;
699 }
Daniel Pieczko27689352013-02-13 10:54:41 +0000700
Ben Hutchings8ceee662008-04-27 12:55:59 +0100701 return rc;
702}
703
stephen hemmingerdebd0032013-03-16 06:57:51 +0000704static void efx_init_rx_recycle_ring(struct efx_nic *efx,
705 struct efx_rx_queue *rx_queue)
Daniel Pieczko27689352013-02-13 10:54:41 +0000706{
707 unsigned int bufs_in_recycle_ring, page_ring_size;
708
709 /* Set the RX recycle ring size */
710#ifdef CONFIG_PPC64
711 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
712#else
Ben Hutchings636d73d2013-06-12 18:09:08 +0100713 if (iommu_present(&pci_bus_type))
Daniel Pieczko27689352013-02-13 10:54:41 +0000714 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
715 else
716 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
717#endif /* CONFIG_PPC64 */
718
719 page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
720 efx->rx_bufs_per_page);
721 rx_queue->page_ring = kcalloc(page_ring_size,
722 sizeof(*rx_queue->page_ring), GFP_KERNEL);
723 rx_queue->page_ptr_mask = page_ring_size - 1;
724}
725
Ben Hutchingsbc3c90a2008-09-01 12:48:46 +0100726void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100727{
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000728 struct efx_nic *efx = rx_queue->efx;
David Riddoch64235182012-04-11 13:12:41 +0100729 unsigned int max_fill, trigger, max_trigger;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100730
Ben Hutchings62776d02010-06-23 11:30:07 +0000731 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
Ben Hutchingsba1e8a32010-09-10 06:41:36 +0000732 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
Ben Hutchings8ceee662008-04-27 12:55:59 +0100733
734 /* Initialise ptr fields */
735 rx_queue->added_count = 0;
736 rx_queue->notified_count = 0;
737 rx_queue->removed_count = 0;
738 rx_queue->min_fill = -1U;
Daniel Pieczko27689352013-02-13 10:54:41 +0000739 efx_init_rx_recycle_ring(efx, rx_queue);
740
741 rx_queue->page_remove = 0;
742 rx_queue->page_add = rx_queue->page_ptr_mask + 1;
743 rx_queue->page_recycle_count = 0;
744 rx_queue->page_recycle_failed = 0;
745 rx_queue->page_recycle_full = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100746
747 /* Initialise limit fields */
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000748 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
Daniel Pieczko1648a232013-02-13 10:54:41 +0000749 max_trigger =
750 max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
David Riddoch64235182012-04-11 13:12:41 +0100751 if (rx_refill_threshold != 0) {
752 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
753 if (trigger > max_trigger)
754 trigger = max_trigger;
755 } else {
756 trigger = max_trigger;
757 }
Ben Hutchings8ceee662008-04-27 12:55:59 +0100758
759 rx_queue->max_fill = max_fill;
760 rx_queue->fast_fill_trigger = trigger;
Ben Hutchingsd8aec742013-05-27 16:52:54 +0100761 rx_queue->refill_enabled = true;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100762
763 /* Set up RX descriptor ring */
Ben Hutchings152b6a62009-11-29 03:43:56 +0000764 efx_nic_init_rx(rx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100765}
766
767void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
768{
769 int i;
Daniel Pieczko27689352013-02-13 10:54:41 +0000770 struct efx_nic *efx = rx_queue->efx;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100771 struct efx_rx_buffer *rx_buf;
772
Ben Hutchings62776d02010-06-23 11:30:07 +0000773 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
Ben Hutchingsba1e8a32010-09-10 06:41:36 +0000774 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
Ben Hutchings8ceee662008-04-27 12:55:59 +0100775
Steve Hodgson90d683a2010-06-01 11:19:39 +0000776 del_timer_sync(&rx_queue->slow_fill);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100777
Daniel Pieczko27689352013-02-13 10:54:41 +0000778 /* Release RX buffers from the current read ptr to the write ptr */
Ben Hutchings8ceee662008-04-27 12:55:59 +0100779 if (rx_queue->buffer) {
Daniel Pieczko27689352013-02-13 10:54:41 +0000780 for (i = rx_queue->removed_count; i < rx_queue->added_count;
781 i++) {
782 unsigned index = i & rx_queue->ptr_mask;
783 rx_buf = efx_rx_buffer(rx_queue, index);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100784 efx_fini_rx_buffer(rx_queue, rx_buf);
785 }
786 }
Daniel Pieczko27689352013-02-13 10:54:41 +0000787
788 /* Unmap and release the pages in the recycle ring. Remove the ring. */
789 for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
790 struct page *page = rx_queue->page_ring[i];
791 struct efx_rx_page_state *state;
792
793 if (page == NULL)
794 continue;
795
796 state = page_address(page);
797 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
798 PAGE_SIZE << efx->rx_buffer_order,
799 DMA_FROM_DEVICE);
800 put_page(page);
801 }
802 kfree(rx_queue->page_ring);
803 rx_queue->page_ring = NULL;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100804}
805
806void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
807{
Ben Hutchings62776d02010-06-23 11:30:07 +0000808 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
Ben Hutchingsba1e8a32010-09-10 06:41:36 +0000809 "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
Ben Hutchings8ceee662008-04-27 12:55:59 +0100810
Ben Hutchings152b6a62009-11-29 03:43:56 +0000811 efx_nic_remove_rx(rx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100812
813 kfree(rx_queue->buffer);
814 rx_queue->buffer = NULL;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100815}
816
Ben Hutchings8ceee662008-04-27 12:55:59 +0100817
Ben Hutchings8ceee662008-04-27 12:55:59 +0100818module_param(rx_refill_threshold, uint, 0444);
819MODULE_PARM_DESC(rx_refill_threshold,
David Riddoch64235182012-04-11 13:12:41 +0100820 "RX descriptor ring refill threshold (%)");
Ben Hutchings8ceee662008-04-27 12:55:59 +0100821
Ben Hutchingsadd72472012-11-08 01:46:53 +0000822#ifdef CONFIG_RFS_ACCEL
823
824int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
825 u16 rxq_index, u32 flow_id)
826{
827 struct efx_nic *efx = netdev_priv(net_dev);
828 struct efx_channel *channel;
829 struct efx_filter_spec spec;
Ben Hutchingsadd72472012-11-08 01:46:53 +0000830 const __be16 *ports;
Ben Hutchingsc47b2d92013-09-03 17:22:23 +0100831 __be16 ether_type;
Ben Hutchingsadd72472012-11-08 01:46:53 +0000832 int nhoff;
833 int rc;
834
Ben Hutchingsc47b2d92013-09-03 17:22:23 +0100835 /* The core RPS/RFS code has already parsed and validated
836 * VLAN, IP and transport headers. We assume they are in the
837 * header area.
838 */
Ben Hutchingsadd72472012-11-08 01:46:53 +0000839
840 if (skb->protocol == htons(ETH_P_8021Q)) {
Ben Hutchingsc47b2d92013-09-03 17:22:23 +0100841 const struct vlan_hdr *vh =
842 (const struct vlan_hdr *)skb->data;
Ben Hutchingsadd72472012-11-08 01:46:53 +0000843
Ben Hutchingsc47b2d92013-09-03 17:22:23 +0100844 /* We can't filter on the IP 5-tuple and the vlan
845 * together, so just strip the vlan header and filter
846 * on the IP part.
Ben Hutchingsadd72472012-11-08 01:46:53 +0000847 */
Ben Hutchingsc47b2d92013-09-03 17:22:23 +0100848 EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh));
849 ether_type = vh->h_vlan_encapsulated_proto;
850 nhoff = sizeof(struct vlan_hdr);
851 } else {
852 ether_type = skb->protocol;
853 nhoff = 0;
Ben Hutchingsadd72472012-11-08 01:46:53 +0000854 }
855
Ben Hutchingsc47b2d92013-09-03 17:22:23 +0100856 if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6))
Ben Hutchingsadd72472012-11-08 01:46:53 +0000857 return -EPROTONOSUPPORT;
Ben Hutchingsadd72472012-11-08 01:46:53 +0000858
859 efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
860 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
861 rxq_index);
Ben Hutchingsc47b2d92013-09-03 17:22:23 +0100862 spec.match_flags =
863 EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
864 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
865 EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
866 spec.ether_type = ether_type;
867
868 if (ether_type == htons(ETH_P_IP)) {
869 const struct iphdr *ip =
870 (const struct iphdr *)(skb->data + nhoff);
871
872 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
873 if (ip_is_fragment(ip))
874 return -EPROTONOSUPPORT;
875 spec.ip_proto = ip->protocol;
876 spec.rem_host[0] = ip->saddr;
877 spec.loc_host[0] = ip->daddr;
878 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
879 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
880 } else {
881 const struct ipv6hdr *ip6 =
882 (const struct ipv6hdr *)(skb->data + nhoff);
883
884 EFX_BUG_ON_PARANOID(skb_headlen(skb) <
885 nhoff + sizeof(*ip6) + 4);
886 spec.ip_proto = ip6->nexthdr;
887 memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr));
888 memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr));
889 ports = (const __be16 *)(ip6 + 1);
890 }
891
892 spec.rem_port = ports[0];
893 spec.loc_port = ports[1];
Ben Hutchingsadd72472012-11-08 01:46:53 +0000894
895 rc = efx->type->filter_rfs_insert(efx, &spec);
896 if (rc < 0)
897 return rc;
898
899 /* Remember this so we can check whether to expire the filter later */
900 efx->rps_flow_id[rc] = flow_id;
901 channel = efx_get_channel(efx, skb_get_rx_queue(skb));
902 ++channel->rfs_filters_added;
903
Ben Hutchingsc47b2d92013-09-03 17:22:23 +0100904 if (ether_type == htons(ETH_P_IP))
905 netif_info(efx, rx_status, efx->net_dev,
906 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
907 (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
908 spec.rem_host, ntohs(ports[0]), spec.loc_host,
909 ntohs(ports[1]), rxq_index, flow_id, rc);
910 else
911 netif_info(efx, rx_status, efx->net_dev,
912 "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
913 (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
914 spec.rem_host, ntohs(ports[0]), spec.loc_host,
915 ntohs(ports[1]), rxq_index, flow_id, rc);
Ben Hutchingsadd72472012-11-08 01:46:53 +0000916
917 return rc;
918}
919
920bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
921{
922 bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
923 unsigned int index, size;
924 u32 flow_id;
925
926 if (!spin_trylock_bh(&efx->filter_lock))
927 return false;
928
929 expire_one = efx->type->filter_rfs_expire_one;
930 index = efx->rps_expire_index;
931 size = efx->type->max_rx_ip_filters;
932 while (quota--) {
933 flow_id = efx->rps_flow_id[index];
934 if (expire_one(efx, flow_id, index))
935 netif_info(efx, rx_status, efx->net_dev,
936 "expired filter %d [flow %u]\n",
937 index, flow_id);
938 if (++index == size)
939 index = 0;
940 }
941 efx->rps_expire_index = index;
942
943 spin_unlock_bh(&efx->filter_lock);
944 return true;
945}
946
947#endif /* CONFIG_RFS_ACCEL */
Ben Hutchingsb883d0b2013-01-15 22:00:07 +0000948
949/**
950 * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
951 * @spec: Specification to test
952 *
953 * Return: %true if the specification is a non-drop RX filter that
954 * matches a local MAC address I/G bit value of 1 or matches a local
955 * IPv4 or IPv6 address value in the respective multicast address
956 * range. Otherwise %false.
957 */
958bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
959{
960 if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
961 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
962 return false;
963
964 if (spec->match_flags &
965 (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
966 is_multicast_ether_addr(spec->loc_mac))
967 return true;
968
969 if ((spec->match_flags &
970 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
971 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
972 if (spec->ether_type == htons(ETH_P_IP) &&
973 ipv4_is_multicast(spec->loc_host[0]))
974 return true;
975 if (spec->ether_type == htons(ETH_P_IPV6) &&
976 ((const u8 *)spec->loc_host)[0] == 0xff)
977 return true;
978 }
979
980 return false;
981}