blob: ede8dcca0ff3516c33a2aee884952b7514924676 [file] [log] [blame]
Ben Hutchings8ceee662008-04-27 12:55:59 +01001/****************************************************************************
Ben Hutchingsf7a6d2c2013-08-29 23:32:48 +01002 * Driver for Solarflare network controllers and boards
Ben Hutchings8ceee662008-04-27 12:55:59 +01003 * Copyright 2005-2006 Fen Systems Ltd.
Ben Hutchingsf7a6d2c2013-08-29 23:32:48 +01004 * Copyright 2005-2013 Solarflare Communications Inc.
Ben Hutchings8ceee662008-04-27 12:55:59 +01005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/pci.h>
12#include <linux/tcp.h>
13#include <linux/ip.h>
14#include <linux/in.h>
Ben Hutchings738a8f42009-11-29 15:16:05 +000015#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/slab.h>
Ben Hutchings738a8f42009-11-29 15:16:05 +000017#include <net/ipv6.h>
Ben Hutchings8ceee662008-04-27 12:55:59 +010018#include <linux/if_ether.h>
19#include <linux/highmem.h>
Ben Hutchings183233b2013-06-28 21:47:12 +010020#include <linux/cache.h>
Ben Hutchings8ceee662008-04-27 12:55:59 +010021#include "net_driver.h"
Ben Hutchings8ceee662008-04-27 12:55:59 +010022#include "efx.h"
Ben Hutchings183233b2013-06-28 21:47:12 +010023#include "io.h"
Ben Hutchings744093c2009-11-29 15:12:08 +000024#include "nic.h"
Ben Hutchings8ceee662008-04-27 12:55:59 +010025#include "workarounds.h"
Ben Hutchingsdfa50be2013-03-08 21:20:09 +000026#include "ef10_regs.h"
Ben Hutchings8ceee662008-04-27 12:55:59 +010027
Ben Hutchings183233b2013-06-28 21:47:12 +010028#ifdef EFX_USE_PIO
29
30#define EFX_PIOBUF_SIZE_MAX ER_DZ_TX_PIOBUF_SIZE
31#define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES)
32unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF;
33
34#endif /* EFX_USE_PIO */
35
Ben Hutchings0fe55652013-06-28 21:47:15 +010036static inline unsigned int
37efx_tx_queue_get_insert_index(const struct efx_tx_queue *tx_queue)
38{
39 return tx_queue->insert_count & tx_queue->ptr_mask;
40}
41
42static inline struct efx_tx_buffer *
43__efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
44{
45 return &tx_queue->buffer[efx_tx_queue_get_insert_index(tx_queue)];
46}
47
48static inline struct efx_tx_buffer *
49efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
50{
51 struct efx_tx_buffer *buffer =
52 __efx_tx_queue_get_insert_buffer(tx_queue);
53
54 EFX_BUG_ON_PARANOID(buffer->len);
55 EFX_BUG_ON_PARANOID(buffer->flags);
56 EFX_BUG_ON_PARANOID(buffer->unmap_len);
57
58 return buffer;
59}
60
Ben Hutchings4d566062008-09-01 12:47:12 +010061static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
Tom Herbertc3940992011-11-28 16:33:43 +000062 struct efx_tx_buffer *buffer,
63 unsigned int *pkts_compl,
64 unsigned int *bytes_compl)
Ben Hutchings8ceee662008-04-27 12:55:59 +010065{
66 if (buffer->unmap_len) {
Ben Hutchings0e33d872012-05-17 17:46:55 +010067 struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
Alexandre Rames2acdb922013-10-31 12:42:32 +000068 dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
Ben Hutchings7668ff92012-05-17 20:52:20 +010069 if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
Ben Hutchings0e33d872012-05-17 17:46:55 +010070 dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
71 DMA_TO_DEVICE);
Ben Hutchings8ceee662008-04-27 12:55:59 +010072 else
Ben Hutchings0e33d872012-05-17 17:46:55 +010073 dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
74 DMA_TO_DEVICE);
Ben Hutchings8ceee662008-04-27 12:55:59 +010075 buffer->unmap_len = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +010076 }
77
Ben Hutchings7668ff92012-05-17 20:52:20 +010078 if (buffer->flags & EFX_TX_BUF_SKB) {
Tom Herbertc3940992011-11-28 16:33:43 +000079 (*pkts_compl)++;
80 (*bytes_compl) += buffer->skb->len;
Ben Hutchings8ceee662008-04-27 12:55:59 +010081 dev_kfree_skb_any((struct sk_buff *) buffer->skb);
Ben Hutchings62776d02010-06-23 11:30:07 +000082 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
83 "TX queue %d transmission id %x complete\n",
84 tx_queue->queue, tx_queue->read_count);
Ben Hutchingsf7251a92012-05-17 18:40:54 +010085 } else if (buffer->flags & EFX_TX_BUF_HEAP) {
86 kfree(buffer->heap_buf);
Ben Hutchings8ceee662008-04-27 12:55:59 +010087 }
Ben Hutchings7668ff92012-05-17 20:52:20 +010088
Ben Hutchingsf7251a92012-05-17 18:40:54 +010089 buffer->len = 0;
90 buffer->flags = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +010091}
92
Ben Hutchingsb9b39b62008-05-07 12:51:12 +010093static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
Ben Hutchings740847d2008-09-01 12:48:23 +010094 struct sk_buff *skb);
Ben Hutchings8ceee662008-04-27 12:55:59 +010095
Ben Hutchings63f19882009-10-23 08:31:20 +000096static inline unsigned
97efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
98{
99 /* Depending on the NIC revision, we can use descriptor
100 * lengths up to 8K or 8K-1. However, since PCI Express
101 * devices must split read requests at 4K boundaries, there is
102 * little benefit from using descriptors that cross those
103 * boundaries and we keep things simple by not doing so.
104 */
Ben Hutchings5b6262d2012-02-02 21:21:15 +0000105 unsigned len = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1;
Ben Hutchings63f19882009-10-23 08:31:20 +0000106
107 /* Work around hardware bug for unaligned buffers. */
108 if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
109 len = min_t(unsigned, len, 512 - (dma_addr & 0xf));
110
111 return len;
112}
113
Ben Hutchings7e6d06f2012-07-30 15:57:44 +0000114unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
115{
116 /* Header and payload descriptor for each output segment, plus
117 * one for every input fragment boundary within a segment
118 */
119 unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
120
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000121 /* Possibly one more per segment for the alignment workaround,
122 * or for option descriptors
123 */
124 if (EFX_WORKAROUND_5391(efx) || efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
Ben Hutchings7e6d06f2012-07-30 15:57:44 +0000125 max_descs += EFX_TSO_MAX_SEGS;
126
127 /* Possibly more for PCIe page boundaries within input fragments */
128 if (PAGE_SIZE > EFX_PAGE_SIZE)
129 max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
130 DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
131
132 return max_descs;
133}
134
Ben Hutchings14bf7182012-05-22 01:27:58 +0100135/* Get partner of a TX queue, seen as part of the same net core queue */
136static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
137{
138 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
139 return tx_queue - EFX_TXQ_TYPE_OFFLOAD;
140 else
141 return tx_queue + EFX_TXQ_TYPE_OFFLOAD;
142}
143
144static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
145{
146 /* We need to consider both queues that the net core sees as one */
147 struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1);
148 struct efx_nic *efx = txq1->efx;
149 unsigned int fill_level;
150
151 fill_level = max(txq1->insert_count - txq1->old_read_count,
152 txq2->insert_count - txq2->old_read_count);
153 if (likely(fill_level < efx->txq_stop_thresh))
154 return;
155
156 /* We used the stale old_read_count above, which gives us a
157 * pessimistic estimate of the fill level (which may even
158 * validly be >= efx->txq_entries). Now try again using
159 * read_count (more likely to be a cache miss).
160 *
161 * If we read read_count and then conditionally stop the
162 * queue, it is possible for the completion path to race with
163 * us and complete all outstanding descriptors in the middle,
164 * after which there will be no more completions to wake it.
165 * Therefore we stop the queue first, then read read_count
166 * (with a memory barrier to ensure the ordering), then
167 * restart the queue if the fill level turns out to be low
168 * enough.
169 */
170 netif_tx_stop_queue(txq1->core_txq);
171 smp_mb();
172 txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
173 txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
174
175 fill_level = max(txq1->insert_count - txq1->old_read_count,
176 txq2->insert_count - txq2->old_read_count);
177 EFX_BUG_ON_PARANOID(fill_level >= efx->txq_entries);
178 if (likely(fill_level < efx->txq_stop_thresh)) {
179 smp_mb();
180 if (likely(!efx->loopback_selftest))
181 netif_tx_start_queue(txq1->core_txq);
182 }
183}
184
Jon Cooperee45fd92013-09-02 18:24:29 +0100185#ifdef EFX_USE_PIO
186
187struct efx_short_copy_buffer {
188 int used;
189 u8 buf[L1_CACHE_BYTES];
190};
191
Jon Cooperdaf37b52014-06-11 14:33:08 +0100192/* Copy in explicit 64-bit writes. */
193static void efx_memcpy_64(void __iomem *dest, void *src, size_t len)
194{
195 u64 *src64 = src;
196 u64 __iomem *dest64 = dest;
197 size_t l64 = len / 8;
198 size_t i;
199
200 for (i = 0; i < l64; i++)
201 writeq(src64[i], &dest64[i]);
202}
203
Jon Cooperee45fd92013-09-02 18:24:29 +0100204/* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
205 * Advances piobuf pointer. Leaves additional data in the copy buffer.
206 */
207static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
208 u8 *data, int len,
209 struct efx_short_copy_buffer *copy_buf)
210{
211 int block_len = len & ~(sizeof(copy_buf->buf) - 1);
212
Jon Cooperdaf37b52014-06-11 14:33:08 +0100213 efx_memcpy_64(*piobuf, data, block_len);
Jon Cooperee45fd92013-09-02 18:24:29 +0100214 *piobuf += block_len;
215 len -= block_len;
216
217 if (len) {
218 data += block_len;
219 BUG_ON(copy_buf->used);
220 BUG_ON(len > sizeof(copy_buf->buf));
221 memcpy(copy_buf->buf, data, len);
222 copy_buf->used = len;
223 }
224}
225
226/* Copy to PIO, respecting dword alignment, popping data from copy buffer first.
227 * Advances piobuf pointer. Leaves additional data in the copy buffer.
228 */
229static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
230 u8 *data, int len,
231 struct efx_short_copy_buffer *copy_buf)
232{
233 if (copy_buf->used) {
234 /* if the copy buffer is partially full, fill it up and write */
235 int copy_to_buf =
236 min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len);
237
238 memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf);
239 copy_buf->used += copy_to_buf;
240
241 /* if we didn't fill it up then we're done for now */
242 if (copy_buf->used < sizeof(copy_buf->buf))
243 return;
244
Jon Cooperdaf37b52014-06-11 14:33:08 +0100245 efx_memcpy_64(*piobuf, copy_buf->buf, sizeof(copy_buf->buf));
Jon Cooperee45fd92013-09-02 18:24:29 +0100246 *piobuf += sizeof(copy_buf->buf);
247 data += copy_to_buf;
248 len -= copy_to_buf;
249 copy_buf->used = 0;
250 }
251
252 efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf);
253}
254
255static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf,
256 struct efx_short_copy_buffer *copy_buf)
257{
258 /* if there's anything in it, write the whole buffer, including junk */
259 if (copy_buf->used)
Jon Cooperdaf37b52014-06-11 14:33:08 +0100260 efx_memcpy_64(piobuf, copy_buf->buf, sizeof(copy_buf->buf));
Jon Cooperee45fd92013-09-02 18:24:29 +0100261}
262
263/* Traverse skb structure and copy fragments in to PIO buffer.
264 * Advances piobuf pointer.
265 */
266static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb,
267 u8 __iomem **piobuf,
268 struct efx_short_copy_buffer *copy_buf)
269{
270 int i;
271
272 efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb),
273 copy_buf);
274
275 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
276 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
277 u8 *vaddr;
278
279 vaddr = kmap_atomic(skb_frag_page(f));
280
281 efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + f->page_offset,
282 skb_frag_size(f), copy_buf);
283 kunmap_atomic(vaddr);
284 }
285
286 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->frag_list);
287}
288
289static struct efx_tx_buffer *
290efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
291{
292 struct efx_tx_buffer *buffer =
293 efx_tx_queue_get_insert_buffer(tx_queue);
294 u8 __iomem *piobuf = tx_queue->piobuf;
295
296 /* Copy to PIO buffer. Ensure the writes are padded to the end
297 * of a cache line, as this is required for write-combining to be
298 * effective on at least x86.
299 */
300
301 if (skb_shinfo(skb)->nr_frags) {
302 /* The size of the copy buffer will ensure all writes
303 * are the size of a cache line.
304 */
305 struct efx_short_copy_buffer copy_buf;
306
307 copy_buf.used = 0;
308
309 efx_skb_copy_bits_to_pio(tx_queue->efx, skb,
310 &piobuf, &copy_buf);
311 efx_flush_copy_buffer(tx_queue->efx, piobuf, &copy_buf);
312 } else {
313 /* Pad the write to the size of a cache line.
314 * We can do this because we know the skb_shared_info sruct is
315 * after the source, and the destination buffer is big enough.
316 */
317 BUILD_BUG_ON(L1_CACHE_BYTES >
318 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
Jon Cooperdaf37b52014-06-11 14:33:08 +0100319 efx_memcpy_64(tx_queue->piobuf, skb->data,
320 ALIGN(skb->len, L1_CACHE_BYTES));
Jon Cooperee45fd92013-09-02 18:24:29 +0100321 }
322
323 EFX_POPULATE_QWORD_5(buffer->option,
324 ESF_DZ_TX_DESC_IS_OPT, 1,
325 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO,
326 ESF_DZ_TX_PIO_CONT, 0,
327 ESF_DZ_TX_PIO_BYTE_CNT, skb->len,
328 ESF_DZ_TX_PIO_BUF_ADDR,
329 tx_queue->piobuf_offset);
330 ++tx_queue->pio_packets;
331 ++tx_queue->insert_count;
332 return buffer;
333}
334#endif /* EFX_USE_PIO */
335
Ben Hutchings8ceee662008-04-27 12:55:59 +0100336/*
337 * Add a socket buffer to a TX queue
338 *
339 * This maps all fragments of a socket buffer for DMA and adds them to
340 * the TX queue. The queue's insert pointer will be incremented by
341 * the number of fragments in the socket buffer.
342 *
343 * If any DMA mapping fails, any mapped fragments will be unmapped,
344 * the queue's insert pointer will be restored to its original value.
345 *
Ben Hutchings497f5ba2009-11-23 16:07:05 +0000346 * This function is split out from efx_hard_start_xmit to allow the
347 * loopback test to direct packets via specific TX queues.
348 *
Ben Hutchings14bf7182012-05-22 01:27:58 +0100349 * Returns NETDEV_TX_OK.
Ben Hutchings8ceee662008-04-27 12:55:59 +0100350 * You must hold netif_tx_lock() to call this function.
351 */
Ben Hutchings497f5ba2009-11-23 16:07:05 +0000352netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100353{
354 struct efx_nic *efx = tx_queue->efx;
Ben Hutchings0e33d872012-05-17 17:46:55 +0100355 struct device *dma_dev = &efx->pci_dev->dev;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100356 struct efx_tx_buffer *buffer;
357 skb_frag_t *fragment;
Ben Hutchings0fe55652013-06-28 21:47:15 +0100358 unsigned int len, unmap_len = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100359 dma_addr_t dma_addr, unmap_addr = 0;
360 unsigned int dma_len;
Ben Hutchings7668ff92012-05-17 20:52:20 +0100361 unsigned short dma_flags;
Ben Hutchings14bf7182012-05-22 01:27:58 +0100362 int i = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100363
364 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
365
Ben Hutchings9bc183d2009-11-23 16:06:47 +0000366 if (skb_shinfo(skb)->gso_size)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100367 return efx_enqueue_skb_tso(tx_queue, skb);
368
Ben Hutchings8ceee662008-04-27 12:55:59 +0100369 /* Get size of the initial fragment */
370 len = skb_headlen(skb);
371
Ben Hutchingsbb145a92009-03-20 13:25:39 +0000372 /* Pad if necessary */
373 if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) {
374 EFX_BUG_ON_PARANOID(skb->data_len);
375 len = 32 + 1;
376 if (skb_pad(skb, len - skb->len))
377 return NETDEV_TX_OK;
378 }
379
Jon Cooperee45fd92013-09-02 18:24:29 +0100380 /* Consider using PIO for short packets */
381#ifdef EFX_USE_PIO
382 if (skb->len <= efx_piobuf_size && tx_queue->piobuf &&
383 efx_nic_tx_is_empty(tx_queue) &&
384 efx_nic_tx_is_empty(efx_tx_queue_partner(tx_queue))) {
385 buffer = efx_enqueue_skb_pio(tx_queue, skb);
386 dma_flags = EFX_TX_BUF_OPTION;
387 goto finish_packet;
388 }
389#endif
390
Ben Hutchings0e33d872012-05-17 17:46:55 +0100391 /* Map for DMA. Use dma_map_single rather than dma_map_page
Ben Hutchings8ceee662008-04-27 12:55:59 +0100392 * since this is more efficient on machines with sparse
393 * memory.
394 */
Ben Hutchings7668ff92012-05-17 20:52:20 +0100395 dma_flags = EFX_TX_BUF_MAP_SINGLE;
Ben Hutchings0e33d872012-05-17 17:46:55 +0100396 dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100397
398 /* Process all fragments */
399 while (1) {
Ben Hutchings0e33d872012-05-17 17:46:55 +0100400 if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
401 goto dma_err;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100402
403 /* Store fields for marking in the per-fragment final
404 * descriptor */
405 unmap_len = len;
406 unmap_addr = dma_addr;
407
408 /* Add to TX queue, splitting across DMA boundaries */
409 do {
Ben Hutchings0fe55652013-06-28 21:47:15 +0100410 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100411
Ben Hutchings63f19882009-10-23 08:31:20 +0000412 dma_len = efx_max_tx_len(efx, dma_addr);
413 if (likely(dma_len >= len))
Ben Hutchings8ceee662008-04-27 12:55:59 +0100414 dma_len = len;
415
Ben Hutchings8ceee662008-04-27 12:55:59 +0100416 /* Fill out per descriptor fields */
417 buffer->len = dma_len;
418 buffer->dma_addr = dma_addr;
Ben Hutchings7668ff92012-05-17 20:52:20 +0100419 buffer->flags = EFX_TX_BUF_CONT;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100420 len -= dma_len;
421 dma_addr += dma_len;
422 ++tx_queue->insert_count;
423 } while (len);
424
425 /* Transfer ownership of the unmapping to the final buffer */
Ben Hutchings7668ff92012-05-17 20:52:20 +0100426 buffer->flags = EFX_TX_BUF_CONT | dma_flags;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100427 buffer->unmap_len = unmap_len;
Alexandre Rames2acdb922013-10-31 12:42:32 +0000428 buffer->dma_offset = buffer->dma_addr - unmap_addr;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100429 unmap_len = 0;
430
431 /* Get address and size of next fragment */
432 if (i >= skb_shinfo(skb)->nr_frags)
433 break;
434 fragment = &skb_shinfo(skb)->frags[i];
Eric Dumazet9e903e02011-10-18 21:00:24 +0000435 len = skb_frag_size(fragment);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100436 i++;
437 /* Map for DMA */
Ben Hutchings7668ff92012-05-17 20:52:20 +0100438 dma_flags = 0;
Ben Hutchings0e33d872012-05-17 17:46:55 +0100439 dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
Ian Campbell5d6bcdf2011-10-06 11:10:48 +0100440 DMA_TO_DEVICE);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100441 }
442
443 /* Transfer ownership of the skb to the final buffer */
Paul Gortmaker440b87e2014-02-06 11:45:12 -0500444#ifdef EFX_USE_PIO
Jon Cooperee45fd92013-09-02 18:24:29 +0100445finish_packet:
Paul Gortmaker440b87e2014-02-06 11:45:12 -0500446#endif
Ben Hutchings8ceee662008-04-27 12:55:59 +0100447 buffer->skb = skb;
Ben Hutchings7668ff92012-05-17 20:52:20 +0100448 buffer->flags = EFX_TX_BUF_SKB | dma_flags;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100449
Tom Herbertc3940992011-11-28 16:33:43 +0000450 netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
451
Ben Hutchings8ceee662008-04-27 12:55:59 +0100452 /* Pass off to hardware */
Ben Hutchings152b6a62009-11-29 03:43:56 +0000453 efx_nic_push_buffers(tx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100454
Ben Hutchings14bf7182012-05-22 01:27:58 +0100455 efx_tx_maybe_stop_queue(tx_queue);
456
Ben Hutchings8ceee662008-04-27 12:55:59 +0100457 return NETDEV_TX_OK;
458
Ben Hutchings0e33d872012-05-17 17:46:55 +0100459 dma_err:
Ben Hutchings62776d02010-06-23 11:30:07 +0000460 netif_err(efx, tx_err, efx->net_dev,
461 " TX queue %d could not map skb with %d bytes %d "
462 "fragments for DMA\n", tx_queue->queue, skb->len,
463 skb_shinfo(skb)->nr_frags + 1);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100464
465 /* Mark the packet as transmitted, and free the SKB ourselves */
Ben Hutchings9bc183d2009-11-23 16:06:47 +0000466 dev_kfree_skb_any(skb);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100467
Ben Hutchings8ceee662008-04-27 12:55:59 +0100468 /* Work backwards until we hit the original insert pointer value */
469 while (tx_queue->insert_count != tx_queue->write_count) {
Tom Herbertc3940992011-11-28 16:33:43 +0000470 unsigned int pkts_compl = 0, bytes_compl = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100471 --tx_queue->insert_count;
Ben Hutchings0fe55652013-06-28 21:47:15 +0100472 buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
Tom Herbertc3940992011-11-28 16:33:43 +0000473 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100474 }
475
476 /* Free the fragment we were mid-way through pushing */
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100477 if (unmap_len) {
Ben Hutchings7668ff92012-05-17 20:52:20 +0100478 if (dma_flags & EFX_TX_BUF_MAP_SINGLE)
Ben Hutchings0e33d872012-05-17 17:46:55 +0100479 dma_unmap_single(dma_dev, unmap_addr, unmap_len,
480 DMA_TO_DEVICE);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100481 else
Ben Hutchings0e33d872012-05-17 17:46:55 +0100482 dma_unmap_page(dma_dev, unmap_addr, unmap_len,
483 DMA_TO_DEVICE);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100484 }
Ben Hutchings8ceee662008-04-27 12:55:59 +0100485
Ben Hutchings14bf7182012-05-22 01:27:58 +0100486 return NETDEV_TX_OK;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100487}
488
489/* Remove packets from the TX queue
490 *
491 * This removes packets from the TX queue, up to and including the
492 * specified index.
493 */
Ben Hutchings4d566062008-09-01 12:47:12 +0100494static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
Tom Herbertc3940992011-11-28 16:33:43 +0000495 unsigned int index,
496 unsigned int *pkts_compl,
497 unsigned int *bytes_compl)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100498{
499 struct efx_nic *efx = tx_queue->efx;
500 unsigned int stop_index, read_ptr;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100501
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000502 stop_index = (index + 1) & tx_queue->ptr_mask;
503 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100504
505 while (read_ptr != stop_index) {
506 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
Ben Hutchingsba8977b2013-01-08 23:43:19 +0000507
508 if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
509 unlikely(buffer->len == 0)) {
Ben Hutchings62776d02010-06-23 11:30:07 +0000510 netif_err(efx, tx_err, efx->net_dev,
511 "TX queue %d spurious TX completion id %x\n",
512 tx_queue->queue, read_ptr);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100513 efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
514 return;
515 }
516
Tom Herbertc3940992011-11-28 16:33:43 +0000517 efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100518
519 ++tx_queue->read_count;
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000520 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100521 }
522}
523
Ben Hutchings8ceee662008-04-27 12:55:59 +0100524/* Initiate a packet transmission. We use one channel per CPU
525 * (sharing when we have more CPUs than channels). On Falcon, the TX
526 * completion events will be directed back to the CPU that transmitted
527 * the packet, which should be cache-efficient.
528 *
529 * Context: non-blocking.
530 * Note that returning anything other than NETDEV_TX_OK will cause the
531 * OS to free the skb.
532 */
Stephen Hemminger613573252009-08-31 19:50:58 +0000533netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
Ben Hutchings2d0cc562012-02-17 00:10:45 +0000534 struct net_device *net_dev)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100535{
Ben Hutchings767e4682008-09-01 12:43:14 +0100536 struct efx_nic *efx = netdev_priv(net_dev);
Ben Hutchings60ac1062008-09-01 12:44:59 +0100537 struct efx_tx_queue *tx_queue;
Ben Hutchings94b274b2011-01-10 21:18:20 +0000538 unsigned index, type;
Ben Hutchings60ac1062008-09-01 12:44:59 +0100539
Ben Hutchingse4abce82011-05-16 18:51:24 +0100540 EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
Ben Hutchingsa7ef5932009-03-04 09:52:37 +0000541
Stuart Hodgson7c236c42012-09-03 11:09:36 +0100542 /* PTP "event" packet */
543 if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
544 unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
545 return efx_ptp_tx(efx, skb);
546 }
547
Ben Hutchings94b274b2011-01-10 21:18:20 +0000548 index = skb_get_queue_mapping(skb);
549 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
550 if (index >= efx->n_tx_channels) {
551 index -= efx->n_tx_channels;
552 type |= EFX_TXQ_TYPE_HIGHPRI;
553 }
554 tx_queue = efx_get_tx_queue(efx, index, type);
Ben Hutchings60ac1062008-09-01 12:44:59 +0100555
Ben Hutchings497f5ba2009-11-23 16:07:05 +0000556 return efx_enqueue_skb(tx_queue, skb);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100557}
558
Ben Hutchings60031fc2011-01-12 18:39:40 +0000559void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
560{
Ben Hutchings94b274b2011-01-10 21:18:20 +0000561 struct efx_nic *efx = tx_queue->efx;
562
Ben Hutchings60031fc2011-01-12 18:39:40 +0000563 /* Must be inverse of queue lookup in efx_hard_start_xmit() */
Ben Hutchings94b274b2011-01-10 21:18:20 +0000564 tx_queue->core_txq =
565 netdev_get_tx_queue(efx->net_dev,
566 tx_queue->queue / EFX_TXQ_TYPES +
567 ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
568 efx->n_tx_channels : 0));
569}
570
571int efx_setup_tc(struct net_device *net_dev, u8 num_tc)
572{
573 struct efx_nic *efx = netdev_priv(net_dev);
574 struct efx_channel *channel;
575 struct efx_tx_queue *tx_queue;
576 unsigned tc;
577 int rc;
578
579 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
580 return -EINVAL;
581
582 if (num_tc == net_dev->num_tc)
583 return 0;
584
585 for (tc = 0; tc < num_tc; tc++) {
586 net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
587 net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
588 }
589
590 if (num_tc > net_dev->num_tc) {
591 /* Initialise high-priority queues as necessary */
592 efx_for_each_channel(channel, efx) {
593 efx_for_each_possible_channel_tx_queue(tx_queue,
594 channel) {
595 if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
596 continue;
597 if (!tx_queue->buffer) {
598 rc = efx_probe_tx_queue(tx_queue);
599 if (rc)
600 return rc;
601 }
602 if (!tx_queue->initialised)
603 efx_init_tx_queue(tx_queue);
604 efx_init_tx_queue_core_txq(tx_queue);
605 }
606 }
607 } else {
608 /* Reduce number of classes before number of queues */
609 net_dev->num_tc = num_tc;
610 }
611
612 rc = netif_set_real_num_tx_queues(net_dev,
613 max_t(int, num_tc, 1) *
614 efx->n_tx_channels);
615 if (rc)
616 return rc;
617
618 /* Do not destroy high-priority queues when they become
619 * unused. We would have to flush them first, and it is
620 * fairly difficult to flush a subset of TX queues. Leave
621 * it to efx_fini_channels().
622 */
623
624 net_dev->num_tc = num_tc;
625 return 0;
Ben Hutchings60031fc2011-01-12 18:39:40 +0000626}
627
Ben Hutchings8ceee662008-04-27 12:55:59 +0100628void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
629{
630 unsigned fill_level;
631 struct efx_nic *efx = tx_queue->efx;
Ben Hutchings14bf7182012-05-22 01:27:58 +0100632 struct efx_tx_queue *txq2;
Tom Herbertc3940992011-11-28 16:33:43 +0000633 unsigned int pkts_compl = 0, bytes_compl = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100634
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000635 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100636
Tom Herbertc3940992011-11-28 16:33:43 +0000637 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
638 netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100639
Ben Hutchings02e12162013-04-27 01:55:21 +0100640 if (pkts_compl > 1)
641 ++tx_queue->merge_events;
642
Ben Hutchings14bf7182012-05-22 01:27:58 +0100643 /* See if we need to restart the netif queue. This memory
644 * barrier ensures that we write read_count (inside
645 * efx_dequeue_buffers()) before reading the queue status.
646 */
Ben Hutchings8ceee662008-04-27 12:55:59 +0100647 smp_mb();
Ben Hutchingsc04bfc62010-12-10 01:24:16 +0000648 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
Neil Turton9d1aea62011-04-04 13:46:23 +0100649 likely(efx->port_enabled) &&
Ben Hutchingse4abce82011-05-16 18:51:24 +0100650 likely(netif_device_present(efx->net_dev))) {
Ben Hutchings14bf7182012-05-22 01:27:58 +0100651 txq2 = efx_tx_queue_partner(tx_queue);
652 fill_level = max(tx_queue->insert_count - tx_queue->read_count,
653 txq2->insert_count - txq2->read_count);
654 if (fill_level <= efx->txq_wake_thresh)
Ben Hutchingsc04bfc62010-12-10 01:24:16 +0000655 netif_tx_wake_queue(tx_queue->core_txq);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100656 }
Ben Hutchingscd385572010-11-15 23:53:11 +0000657
658 /* Check whether the hardware queue is now empty */
659 if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
660 tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
661 if (tx_queue->read_count == tx_queue->old_write_count) {
662 smp_mb();
663 tx_queue->empty_read_count =
664 tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
665 }
666 }
Ben Hutchings8ceee662008-04-27 12:55:59 +0100667}
668
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100669/* Size of page-based TSO header buffers. Larger blocks must be
670 * allocated from the heap.
671 */
672#define TSOH_STD_SIZE 128
673#define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE)
674
675/* At most half the descriptors in the queue at any time will refer to
676 * a TSO header buffer, since they must always be followed by a
677 * payload descriptor referring to an skb.
678 */
679static unsigned int efx_tsoh_page_count(struct efx_tx_queue *tx_queue)
680{
681 return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 2 * TSOH_PER_PAGE);
682}
683
Ben Hutchings8ceee662008-04-27 12:55:59 +0100684int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
685{
686 struct efx_nic *efx = tx_queue->efx;
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000687 unsigned int entries;
Ben Hutchings7668ff92012-05-17 20:52:20 +0100688 int rc;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100689
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000690 /* Create the smallest power-of-two aligned ring */
691 entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
692 EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
693 tx_queue->ptr_mask = entries - 1;
694
695 netif_dbg(efx, probe, efx->net_dev,
696 "creating TX queue %d size %#x mask %#x\n",
697 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100698
699 /* Allocate software ring */
Thomas Meyerc2e4e252011-12-02 12:36:13 +0000700 tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000701 GFP_KERNEL);
Ben Hutchings60ac1062008-09-01 12:44:59 +0100702 if (!tx_queue->buffer)
703 return -ENOMEM;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100704
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100705 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) {
706 tx_queue->tsoh_page =
707 kcalloc(efx_tsoh_page_count(tx_queue),
708 sizeof(tx_queue->tsoh_page[0]), GFP_KERNEL);
709 if (!tx_queue->tsoh_page) {
710 rc = -ENOMEM;
711 goto fail1;
712 }
713 }
714
Ben Hutchings8ceee662008-04-27 12:55:59 +0100715 /* Allocate hardware ring */
Ben Hutchings152b6a62009-11-29 03:43:56 +0000716 rc = efx_nic_probe_tx(tx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100717 if (rc)
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100718 goto fail2;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100719
720 return 0;
721
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100722fail2:
723 kfree(tx_queue->tsoh_page);
724 tx_queue->tsoh_page = NULL;
725fail1:
Ben Hutchings8ceee662008-04-27 12:55:59 +0100726 kfree(tx_queue->buffer);
727 tx_queue->buffer = NULL;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100728 return rc;
729}
730
Ben Hutchingsbc3c90a2008-09-01 12:48:46 +0100731void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100732{
Ben Hutchings62776d02010-06-23 11:30:07 +0000733 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
734 "initialising TX queue %d\n", tx_queue->queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100735
736 tx_queue->insert_count = 0;
737 tx_queue->write_count = 0;
Ben Hutchingscd385572010-11-15 23:53:11 +0000738 tx_queue->old_write_count = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100739 tx_queue->read_count = 0;
740 tx_queue->old_read_count = 0;
Ben Hutchingscd385572010-11-15 23:53:11 +0000741 tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100742
743 /* Set up TX descriptor ring */
Ben Hutchings152b6a62009-11-29 03:43:56 +0000744 efx_nic_init_tx(tx_queue);
Ben Hutchings94b274b2011-01-10 21:18:20 +0000745
746 tx_queue->initialised = true;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100747}
748
Ben Hutchingse42c3d82013-05-27 16:52:54 +0100749void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100750{
751 struct efx_tx_buffer *buffer;
752
Ben Hutchingse42c3d82013-05-27 16:52:54 +0100753 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
754 "shutting down TX queue %d\n", tx_queue->queue);
755
Ben Hutchings8ceee662008-04-27 12:55:59 +0100756 if (!tx_queue->buffer)
757 return;
758
759 /* Free any buffers left in the ring */
760 while (tx_queue->read_count != tx_queue->write_count) {
Tom Herbertc3940992011-11-28 16:33:43 +0000761 unsigned int pkts_compl = 0, bytes_compl = 0;
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000762 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
Tom Herbertc3940992011-11-28 16:33:43 +0000763 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100764
765 ++tx_queue->read_count;
766 }
Tom Herbertc3940992011-11-28 16:33:43 +0000767 netdev_tx_reset_queue(tx_queue->core_txq);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100768}
769
Ben Hutchings8ceee662008-04-27 12:55:59 +0100770void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
771{
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100772 int i;
773
Ben Hutchings94b274b2011-01-10 21:18:20 +0000774 if (!tx_queue->buffer)
775 return;
776
Ben Hutchings62776d02010-06-23 11:30:07 +0000777 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
778 "destroying TX queue %d\n", tx_queue->queue);
Ben Hutchings152b6a62009-11-29 03:43:56 +0000779 efx_nic_remove_tx(tx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100780
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100781 if (tx_queue->tsoh_page) {
782 for (i = 0; i < efx_tsoh_page_count(tx_queue); i++)
783 efx_nic_free_buffer(tx_queue->efx,
784 &tx_queue->tsoh_page[i]);
785 kfree(tx_queue->tsoh_page);
786 tx_queue->tsoh_page = NULL;
787 }
788
Ben Hutchings8ceee662008-04-27 12:55:59 +0100789 kfree(tx_queue->buffer);
790 tx_queue->buffer = NULL;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100791}
792
793
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100794/* Efx TCP segmentation acceleration.
795 *
796 * Why? Because by doing it here in the driver we can go significantly
797 * faster than the GSO.
798 *
799 * Requires TX checksum offload support.
800 */
801
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100802#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100803
804/**
805 * struct tso_state - TSO state for an SKB
Ben Hutchings23d9e602008-09-01 12:47:02 +0100806 * @out_len: Remaining length in current segment
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100807 * @seqnum: Current sequence number
Ben Hutchings23d9e602008-09-01 12:47:02 +0100808 * @ipv4_id: Current IPv4 ID, host endian
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100809 * @packet_space: Remaining space in current packet
Ben Hutchings23d9e602008-09-01 12:47:02 +0100810 * @dma_addr: DMA address of current position
811 * @in_len: Remaining length in current SKB fragment
812 * @unmap_len: Length of SKB fragment
813 * @unmap_addr: DMA address of SKB fragment
Ben Hutchings7668ff92012-05-17 20:52:20 +0100814 * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0
Ben Hutchings738a8f42009-11-29 15:16:05 +0000815 * @protocol: Network protocol (after any VLAN header)
Ben Hutchings97142842012-06-22 02:44:01 +0100816 * @ip_off: Offset of IP header
817 * @tcp_off: Offset of TCP header
Ben Hutchings23d9e602008-09-01 12:47:02 +0100818 * @header_len: Number of bytes of header
Ben Hutchings53cb13c2012-06-19 20:03:41 +0100819 * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000820 * @header_dma_addr: Header DMA address, when using option descriptors
821 * @header_unmap_len: Header DMA mapped length, or 0 if not using option
822 * descriptors
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100823 *
824 * The state used during segmentation. It is put into this data structure
825 * just to make it easy to pass into inline functions.
826 */
827struct tso_state {
Ben Hutchings23d9e602008-09-01 12:47:02 +0100828 /* Output position */
829 unsigned out_len;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100830 unsigned seqnum;
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000831 u16 ipv4_id;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100832 unsigned packet_space;
833
Ben Hutchings23d9e602008-09-01 12:47:02 +0100834 /* Input position */
835 dma_addr_t dma_addr;
836 unsigned in_len;
837 unsigned unmap_len;
838 dma_addr_t unmap_addr;
Ben Hutchings7668ff92012-05-17 20:52:20 +0100839 unsigned short dma_flags;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100840
Ben Hutchings738a8f42009-11-29 15:16:05 +0000841 __be16 protocol;
Ben Hutchings97142842012-06-22 02:44:01 +0100842 unsigned int ip_off;
843 unsigned int tcp_off;
Ben Hutchings23d9e602008-09-01 12:47:02 +0100844 unsigned header_len;
Ben Hutchings53cb13c2012-06-19 20:03:41 +0100845 unsigned int ip_base_len;
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000846 dma_addr_t header_dma_addr;
847 unsigned int header_unmap_len;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100848};
849
850
851/*
852 * Verify that our various assumptions about sk_buffs and the conditions
Ben Hutchings738a8f42009-11-29 15:16:05 +0000853 * under which TSO will be attempted hold true. Return the protocol number.
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100854 */
Ben Hutchings738a8f42009-11-29 15:16:05 +0000855static __be16 efx_tso_check_protocol(struct sk_buff *skb)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100856{
Ben Hutchings740847d2008-09-01 12:48:23 +0100857 __be16 protocol = skb->protocol;
858
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100859 EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
Ben Hutchings740847d2008-09-01 12:48:23 +0100860 protocol);
861 if (protocol == htons(ETH_P_8021Q)) {
Ben Hutchings740847d2008-09-01 12:48:23 +0100862 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
863 protocol = veh->h_vlan_encapsulated_proto;
Ben Hutchings740847d2008-09-01 12:48:23 +0100864 }
865
Ben Hutchings738a8f42009-11-29 15:16:05 +0000866 if (protocol == htons(ETH_P_IP)) {
867 EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
868 } else {
869 EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6));
870 EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP);
871 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100872 EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
873 + (tcp_hdr(skb)->doff << 2u)) >
874 skb_headlen(skb));
Ben Hutchings738a8f42009-11-29 15:16:05 +0000875
876 return protocol;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100877}
878
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100879static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
880 struct efx_tx_buffer *buffer, unsigned int len)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100881{
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100882 u8 *result;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100883
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100884 EFX_BUG_ON_PARANOID(buffer->len);
885 EFX_BUG_ON_PARANOID(buffer->flags);
886 EFX_BUG_ON_PARANOID(buffer->unmap_len);
887
Ben Hutchings0bdadad2014-02-12 18:58:57 +0000888 if (likely(len <= TSOH_STD_SIZE - NET_IP_ALIGN)) {
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100889 unsigned index =
890 (tx_queue->insert_count & tx_queue->ptr_mask) / 2;
891 struct efx_buffer *page_buf =
892 &tx_queue->tsoh_page[index / TSOH_PER_PAGE];
893 unsigned offset =
Ben Hutchings0bdadad2014-02-12 18:58:57 +0000894 TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + NET_IP_ALIGN;
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100895
896 if (unlikely(!page_buf->addr) &&
Ben Hutchings0d19a542012-09-18 21:59:52 +0100897 efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
898 GFP_ATOMIC))
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100899 return NULL;
900
901 result = (u8 *)page_buf->addr + offset;
902 buffer->dma_addr = page_buf->dma_addr + offset;
903 buffer->flags = EFX_TX_BUF_CONT;
904 } else {
905 tx_queue->tso_long_headers++;
906
Ben Hutchings0bdadad2014-02-12 18:58:57 +0000907 buffer->heap_buf = kmalloc(NET_IP_ALIGN + len, GFP_ATOMIC);
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100908 if (unlikely(!buffer->heap_buf))
909 return NULL;
Ben Hutchings0bdadad2014-02-12 18:58:57 +0000910 result = (u8 *)buffer->heap_buf + NET_IP_ALIGN;
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100911 buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100912 }
913
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100914 buffer->len = len;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100915
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100916 return result;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100917}
918
919/**
920 * efx_tx_queue_insert - push descriptors onto the TX queue
921 * @tx_queue: Efx TX queue
922 * @dma_addr: DMA address of fragment
923 * @len: Length of fragment
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100924 * @final_buffer: The final buffer inserted into the queue
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100925 *
Ben Hutchings14bf7182012-05-22 01:27:58 +0100926 * Push descriptors onto the TX queue.
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100927 */
Ben Hutchings14bf7182012-05-22 01:27:58 +0100928static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
929 dma_addr_t dma_addr, unsigned len,
930 struct efx_tx_buffer **final_buffer)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100931{
932 struct efx_tx_buffer *buffer;
933 struct efx_nic *efx = tx_queue->efx;
Ben Hutchings0fe55652013-06-28 21:47:15 +0100934 unsigned dma_len;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100935
936 EFX_BUG_ON_PARANOID(len <= 0);
937
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100938 while (1) {
Ben Hutchings0fe55652013-06-28 21:47:15 +0100939 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100940 ++tx_queue->insert_count;
941
942 EFX_BUG_ON_PARANOID(tx_queue->insert_count -
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000943 tx_queue->read_count >=
944 efx->txq_entries);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100945
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100946 buffer->dma_addr = dma_addr;
947
Ben Hutchings63f19882009-10-23 08:31:20 +0000948 dma_len = efx_max_tx_len(efx, dma_addr);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100949
950 /* If there is enough space to send then do so */
951 if (dma_len >= len)
952 break;
953
Ben Hutchings7668ff92012-05-17 20:52:20 +0100954 buffer->len = dma_len;
955 buffer->flags = EFX_TX_BUF_CONT;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100956 dma_addr += dma_len;
957 len -= dma_len;
958 }
959
960 EFX_BUG_ON_PARANOID(!len);
961 buffer->len = len;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100962 *final_buffer = buffer;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100963}
964
965
966/*
967 * Put a TSO header into the TX queue.
968 *
969 * This is special-cased because we know that it is small enough to fit in
970 * a single fragment, and we know it doesn't cross a page boundary. It
971 * also allows us to not worry about end-of-packet etc.
972 */
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100973static int efx_tso_put_header(struct efx_tx_queue *tx_queue,
974 struct efx_tx_buffer *buffer, u8 *header)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100975{
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100976 if (unlikely(buffer->flags & EFX_TX_BUF_HEAP)) {
977 buffer->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
978 header, buffer->len,
979 DMA_TO_DEVICE);
980 if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
981 buffer->dma_addr))) {
982 kfree(buffer->heap_buf);
983 buffer->len = 0;
984 buffer->flags = 0;
985 return -ENOMEM;
986 }
987 buffer->unmap_len = buffer->len;
Alexandre Rames2acdb922013-10-31 12:42:32 +0000988 buffer->dma_offset = 0;
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100989 buffer->flags |= EFX_TX_BUF_MAP_SINGLE;
990 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100991
992 ++tx_queue->insert_count;
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100993 return 0;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100994}
995
996
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100997/* Remove buffers put into a tx_queue. None of the buffers must have
998 * an skb attached.
999 */
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001000static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
1001{
1002 struct efx_tx_buffer *buffer;
1003
1004 /* Work backwards until we hit the original insert pointer value */
1005 while (tx_queue->insert_count != tx_queue->write_count) {
1006 --tx_queue->insert_count;
Ben Hutchings0fe55652013-06-28 21:47:15 +01001007 buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
Ben Hutchingsf7251a92012-05-17 18:40:54 +01001008 efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001009 }
1010}
1011
1012
1013/* Parse the SKB header and initialise state. */
Ben Hutchingsc78c39e2013-03-08 20:03:17 +00001014static int tso_start(struct tso_state *st, struct efx_nic *efx,
1015 const struct sk_buff *skb)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001016{
Ben Hutchings93413f52014-02-12 18:59:41 +00001017 bool use_opt_desc = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
Ben Hutchingsdfa50be2013-03-08 21:20:09 +00001018 struct device *dma_dev = &efx->pci_dev->dev;
Ben Hutchingsc78c39e2013-03-08 20:03:17 +00001019 unsigned int header_len, in_len;
Ben Hutchingsdfa50be2013-03-08 21:20:09 +00001020 dma_addr_t dma_addr;
Ben Hutchingsc78c39e2013-03-08 20:03:17 +00001021
Ben Hutchings97142842012-06-22 02:44:01 +01001022 st->ip_off = skb_network_header(skb) - skb->data;
1023 st->tcp_off = skb_transport_header(skb) - skb->data;
Ben Hutchingsc78c39e2013-03-08 20:03:17 +00001024 header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
1025 in_len = skb_headlen(skb) - header_len;
1026 st->header_len = header_len;
1027 st->in_len = in_len;
Ben Hutchings53cb13c2012-06-19 20:03:41 +01001028 if (st->protocol == htons(ETH_P_IP)) {
Ben Hutchings97142842012-06-22 02:44:01 +01001029 st->ip_base_len = st->header_len - st->ip_off;
Ben Hutchings738a8f42009-11-29 15:16:05 +00001030 st->ipv4_id = ntohs(ip_hdr(skb)->id);
Ben Hutchings53cb13c2012-06-19 20:03:41 +01001031 } else {
Ben Hutchings97142842012-06-22 02:44:01 +01001032 st->ip_base_len = st->header_len - st->tcp_off;
Ben Hutchings738a8f42009-11-29 15:16:05 +00001033 st->ipv4_id = 0;
Ben Hutchings53cb13c2012-06-19 20:03:41 +01001034 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001035 st->seqnum = ntohl(tcp_hdr(skb)->seq);
1036
1037 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
1038 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
1039 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
1040
Ben Hutchingsc78c39e2013-03-08 20:03:17 +00001041 st->out_len = skb->len - header_len;
1042
Ben Hutchings93413f52014-02-12 18:59:41 +00001043 if (!use_opt_desc) {
Ben Hutchingsdfa50be2013-03-08 21:20:09 +00001044 st->header_unmap_len = 0;
1045
1046 if (likely(in_len == 0)) {
1047 st->dma_flags = 0;
1048 st->unmap_len = 0;
1049 return 0;
1050 }
1051
1052 dma_addr = dma_map_single(dma_dev, skb->data + header_len,
1053 in_len, DMA_TO_DEVICE);
1054 st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
1055 st->dma_addr = dma_addr;
1056 st->unmap_addr = dma_addr;
1057 st->unmap_len = in_len;
1058 } else {
1059 dma_addr = dma_map_single(dma_dev, skb->data,
1060 skb_headlen(skb), DMA_TO_DEVICE);
1061 st->header_dma_addr = dma_addr;
1062 st->header_unmap_len = skb_headlen(skb);
Ben Hutchingsc78c39e2013-03-08 20:03:17 +00001063 st->dma_flags = 0;
Ben Hutchingsdfa50be2013-03-08 21:20:09 +00001064 st->dma_addr = dma_addr + header_len;
1065 st->unmap_len = 0;
Ben Hutchingsc78c39e2013-03-08 20:03:17 +00001066 }
1067
Ben Hutchingsdfa50be2013-03-08 21:20:09 +00001068 return unlikely(dma_mapping_error(dma_dev, dma_addr)) ? -ENOMEM : 0;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001069}
1070
Ben Hutchings4d566062008-09-01 12:47:12 +01001071static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
1072 skb_frag_t *frag)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001073{
Ian Campbell4a22c4c2011-09-21 21:53:16 +00001074 st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +00001075 skb_frag_size(frag), DMA_TO_DEVICE);
Ian Campbell5d6bcdf2011-10-06 11:10:48 +01001076 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
Ben Hutchings7668ff92012-05-17 20:52:20 +01001077 st->dma_flags = 0;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001078 st->unmap_len = skb_frag_size(frag);
1079 st->in_len = skb_frag_size(frag);
Ben Hutchings23d9e602008-09-01 12:47:02 +01001080 st->dma_addr = st->unmap_addr;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001081 return 0;
1082 }
1083 return -ENOMEM;
1084}
1085
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001086
1087/**
1088 * tso_fill_packet_with_fragment - form descriptors for the current fragment
1089 * @tx_queue: Efx TX queue
1090 * @skb: Socket buffer
1091 * @st: TSO state
1092 *
1093 * Form descriptors for the current fragment, until we reach the end
Ben Hutchings14bf7182012-05-22 01:27:58 +01001094 * of fragment or end-of-packet.
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001095 */
Ben Hutchings14bf7182012-05-22 01:27:58 +01001096static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
1097 const struct sk_buff *skb,
1098 struct tso_state *st)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001099{
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001100 struct efx_tx_buffer *buffer;
Ben Hutchings14bf7182012-05-22 01:27:58 +01001101 int n;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001102
Ben Hutchings23d9e602008-09-01 12:47:02 +01001103 if (st->in_len == 0)
Ben Hutchings14bf7182012-05-22 01:27:58 +01001104 return;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001105 if (st->packet_space == 0)
Ben Hutchings14bf7182012-05-22 01:27:58 +01001106 return;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001107
Ben Hutchings23d9e602008-09-01 12:47:02 +01001108 EFX_BUG_ON_PARANOID(st->in_len <= 0);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001109 EFX_BUG_ON_PARANOID(st->packet_space <= 0);
1110
Ben Hutchings23d9e602008-09-01 12:47:02 +01001111 n = min(st->in_len, st->packet_space);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001112
1113 st->packet_space -= n;
Ben Hutchings23d9e602008-09-01 12:47:02 +01001114 st->out_len -= n;
1115 st->in_len -= n;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001116
Ben Hutchings14bf7182012-05-22 01:27:58 +01001117 efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001118
Ben Hutchings14bf7182012-05-22 01:27:58 +01001119 if (st->out_len == 0) {
1120 /* Transfer ownership of the skb */
1121 buffer->skb = skb;
1122 buffer->flags = EFX_TX_BUF_SKB;
1123 } else if (st->packet_space != 0) {
1124 buffer->flags = EFX_TX_BUF_CONT;
1125 }
1126
1127 if (st->in_len == 0) {
1128 /* Transfer ownership of the DMA mapping */
1129 buffer->unmap_len = st->unmap_len;
Alexandre Rames2acdb922013-10-31 12:42:32 +00001130 buffer->dma_offset = buffer->unmap_len - buffer->len;
Ben Hutchings14bf7182012-05-22 01:27:58 +01001131 buffer->flags |= st->dma_flags;
1132 st->unmap_len = 0;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001133 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001134
Ben Hutchings23d9e602008-09-01 12:47:02 +01001135 st->dma_addr += n;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001136}
1137
1138
1139/**
1140 * tso_start_new_packet - generate a new header and prepare for the new packet
1141 * @tx_queue: Efx TX queue
1142 * @skb: Socket buffer
1143 * @st: TSO state
1144 *
1145 * Generate a new header and prepare for the new packet. Return 0 on
Ben Hutchingsf7251a92012-05-17 18:40:54 +01001146 * success, or -%ENOMEM if failed to alloc header.
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001147 */
Ben Hutchings4d566062008-09-01 12:47:12 +01001148static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
1149 const struct sk_buff *skb,
1150 struct tso_state *st)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001151{
Ben Hutchingsf7251a92012-05-17 18:40:54 +01001152 struct efx_tx_buffer *buffer =
Ben Hutchings0fe55652013-06-28 21:47:15 +01001153 efx_tx_queue_get_insert_buffer(tx_queue);
Ben Hutchingsdfa50be2013-03-08 21:20:09 +00001154 bool is_last = st->out_len <= skb_shinfo(skb)->gso_size;
1155 u8 tcp_flags_clear;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001156
Ben Hutchingsdfa50be2013-03-08 21:20:09 +00001157 if (!is_last) {
Ben Hutchings53cb13c2012-06-19 20:03:41 +01001158 st->packet_space = skb_shinfo(skb)->gso_size;
Ben Hutchingsdfa50be2013-03-08 21:20:09 +00001159 tcp_flags_clear = 0x09; /* mask out FIN and PSH */
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001160 } else {
Ben Hutchings53cb13c2012-06-19 20:03:41 +01001161 st->packet_space = st->out_len;
Ben Hutchingsdfa50be2013-03-08 21:20:09 +00001162 tcp_flags_clear = 0x00;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001163 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001164
Ben Hutchingsdfa50be2013-03-08 21:20:09 +00001165 if (!st->header_unmap_len) {
1166 /* Allocate and insert a DMA-mapped header buffer. */
1167 struct tcphdr *tsoh_th;
1168 unsigned ip_length;
1169 u8 *header;
1170 int rc;
Ben Hutchings738a8f42009-11-29 15:16:05 +00001171
Ben Hutchingsdfa50be2013-03-08 21:20:09 +00001172 header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
1173 if (!header)
1174 return -ENOMEM;
Ben Hutchings738a8f42009-11-29 15:16:05 +00001175
Ben Hutchingsdfa50be2013-03-08 21:20:09 +00001176 tsoh_th = (struct tcphdr *)(header + st->tcp_off);
1177
1178 /* Copy and update the headers. */
1179 memcpy(header, skb->data, st->header_len);
1180
1181 tsoh_th->seq = htonl(st->seqnum);
1182 ((u8 *)tsoh_th)[13] &= ~tcp_flags_clear;
1183
1184 ip_length = st->ip_base_len + st->packet_space;
1185
1186 if (st->protocol == htons(ETH_P_IP)) {
1187 struct iphdr *tsoh_iph =
1188 (struct iphdr *)(header + st->ip_off);
1189
1190 tsoh_iph->tot_len = htons(ip_length);
1191 tsoh_iph->id = htons(st->ipv4_id);
1192 } else {
1193 struct ipv6hdr *tsoh_iph =
1194 (struct ipv6hdr *)(header + st->ip_off);
1195
1196 tsoh_iph->payload_len = htons(ip_length);
1197 }
1198
1199 rc = efx_tso_put_header(tx_queue, buffer, header);
1200 if (unlikely(rc))
1201 return rc;
Ben Hutchings738a8f42009-11-29 15:16:05 +00001202 } else {
Ben Hutchingsdfa50be2013-03-08 21:20:09 +00001203 /* Send the original headers with a TSO option descriptor
1204 * in front
1205 */
1206 u8 tcp_flags = ((u8 *)tcp_hdr(skb))[13] & ~tcp_flags_clear;
Ben Hutchings738a8f42009-11-29 15:16:05 +00001207
Ben Hutchingsdfa50be2013-03-08 21:20:09 +00001208 buffer->flags = EFX_TX_BUF_OPTION;
1209 buffer->len = 0;
1210 buffer->unmap_len = 0;
1211 EFX_POPULATE_QWORD_5(buffer->option,
1212 ESF_DZ_TX_DESC_IS_OPT, 1,
1213 ESF_DZ_TX_OPTION_TYPE,
1214 ESE_DZ_TX_OPTION_DESC_TSO,
1215 ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
1216 ESF_DZ_TX_TSO_IP_ID, st->ipv4_id,
1217 ESF_DZ_TX_TSO_TCP_SEQNO, st->seqnum);
1218 ++tx_queue->insert_count;
1219
1220 /* We mapped the headers in tso_start(). Unmap them
1221 * when the last segment is completed.
1222 */
Ben Hutchings0fe55652013-06-28 21:47:15 +01001223 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
Ben Hutchingsdfa50be2013-03-08 21:20:09 +00001224 buffer->dma_addr = st->header_dma_addr;
1225 buffer->len = st->header_len;
1226 if (is_last) {
1227 buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_MAP_SINGLE;
1228 buffer->unmap_len = st->header_unmap_len;
Alexandre Rames2acdb922013-10-31 12:42:32 +00001229 buffer->dma_offset = 0;
Ben Hutchingsdfa50be2013-03-08 21:20:09 +00001230 /* Ensure we only unmap them once in case of a
1231 * later DMA mapping error and rollback
1232 */
1233 st->header_unmap_len = 0;
1234 } else {
1235 buffer->flags = EFX_TX_BUF_CONT;
1236 buffer->unmap_len = 0;
1237 }
1238 ++tx_queue->insert_count;
Ben Hutchings738a8f42009-11-29 15:16:05 +00001239 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001240
Ben Hutchingsdfa50be2013-03-08 21:20:09 +00001241 st->seqnum += skb_shinfo(skb)->gso_size;
1242
1243 /* Linux leaves suitable gaps in the IP ID space for us to fill. */
1244 ++st->ipv4_id;
Ben Hutchingsf7251a92012-05-17 18:40:54 +01001245
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001246 ++tx_queue->tso_packets;
1247
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001248 return 0;
1249}
1250
1251
1252/**
1253 * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
1254 * @tx_queue: Efx TX queue
1255 * @skb: Socket buffer
1256 *
1257 * Context: You must hold netif_tx_lock() to call this function.
1258 *
1259 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1260 * @skb was not enqueued. In all cases @skb is consumed. Return
Ben Hutchings14bf7182012-05-22 01:27:58 +01001261 * %NETDEV_TX_OK.
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001262 */
1263static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
Ben Hutchings740847d2008-09-01 12:48:23 +01001264 struct sk_buff *skb)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001265{
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001266 struct efx_nic *efx = tx_queue->efx;
Ben Hutchings14bf7182012-05-22 01:27:58 +01001267 int frag_i, rc;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001268 struct tso_state state;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001269
Ben Hutchings738a8f42009-11-29 15:16:05 +00001270 /* Find the packet protocol and sanity-check it */
1271 state.protocol = efx_tso_check_protocol(skb);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001272
1273 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
1274
Ben Hutchingsc78c39e2013-03-08 20:03:17 +00001275 rc = tso_start(&state, efx, skb);
1276 if (rc)
1277 goto mem_err;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001278
Ben Hutchingsc78c39e2013-03-08 20:03:17 +00001279 if (likely(state.in_len == 0)) {
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001280 /* Grab the first payload fragment. */
1281 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
1282 frag_i = 0;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001283 rc = tso_get_fragment(&state, efx,
1284 skb_shinfo(skb)->frags + frag_i);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001285 if (rc)
1286 goto mem_err;
1287 } else {
Ben Hutchingsc78c39e2013-03-08 20:03:17 +00001288 /* Payload starts in the header area. */
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001289 frag_i = -1;
1290 }
1291
1292 if (tso_start_new_packet(tx_queue, skb, &state) < 0)
1293 goto mem_err;
1294
1295 while (1) {
Ben Hutchings14bf7182012-05-22 01:27:58 +01001296 tso_fill_packet_with_fragment(tx_queue, skb, &state);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001297
1298 /* Move onto the next fragment? */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001299 if (state.in_len == 0) {
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001300 if (++frag_i >= skb_shinfo(skb)->nr_frags)
1301 /* End of payload reached. */
1302 break;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001303 rc = tso_get_fragment(&state, efx,
1304 skb_shinfo(skb)->frags + frag_i);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001305 if (rc)
1306 goto mem_err;
1307 }
1308
1309 /* Start at new packet? */
1310 if (state.packet_space == 0 &&
1311 tso_start_new_packet(tx_queue, skb, &state) < 0)
1312 goto mem_err;
1313 }
1314
Eric Dumazet449fa022011-11-30 17:12:27 -05001315 netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
1316
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001317 /* Pass off to hardware */
Ben Hutchings152b6a62009-11-29 03:43:56 +00001318 efx_nic_push_buffers(tx_queue);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001319
Ben Hutchings14bf7182012-05-22 01:27:58 +01001320 efx_tx_maybe_stop_queue(tx_queue);
1321
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001322 tx_queue->tso_bursts++;
1323 return NETDEV_TX_OK;
1324
1325 mem_err:
Ben Hutchings62776d02010-06-23 11:30:07 +00001326 netif_err(efx, tx_err, efx->net_dev,
Ben Hutchings0e33d872012-05-17 17:46:55 +01001327 "Out of memory for TSO headers, or DMA mapping error\n");
Ben Hutchings9bc183d2009-11-23 16:06:47 +00001328 dev_kfree_skb_any(skb);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001329
Ben Hutchings5988b632008-09-01 12:46:36 +01001330 /* Free the DMA mapping we were in the process of writing out */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001331 if (state.unmap_len) {
Ben Hutchings7668ff92012-05-17 20:52:20 +01001332 if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE)
Ben Hutchings0e33d872012-05-17 17:46:55 +01001333 dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr,
1334 state.unmap_len, DMA_TO_DEVICE);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001335 else
Ben Hutchings0e33d872012-05-17 17:46:55 +01001336 dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr,
1337 state.unmap_len, DMA_TO_DEVICE);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001338 }
Ben Hutchings5988b632008-09-01 12:46:36 +01001339
Ben Hutchingsdfa50be2013-03-08 21:20:09 +00001340 /* Free the header DMA mapping, if using option descriptors */
1341 if (state.header_unmap_len)
1342 dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr,
1343 state.header_unmap_len, DMA_TO_DEVICE);
1344
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001345 efx_enqueue_unwind(tx_queue);
Ben Hutchings14bf7182012-05-22 01:27:58 +01001346 return NETDEV_TX_OK;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001347}