blob: 6cecda3b3cbe3a23f27ec868bc90be6de01405d8 [file] [log] [blame]
Ben Hutchings8ceee662008-04-27 12:55:59 +01001/****************************************************************************
Ben Hutchingsf7a6d2c2013-08-29 23:32:48 +01002 * Driver for Solarflare network controllers and boards
Ben Hutchings8ceee662008-04-27 12:55:59 +01003 * Copyright 2005-2006 Fen Systems Ltd.
Ben Hutchingsf7a6d2c2013-08-29 23:32:48 +01004 * Copyright 2005-2013 Solarflare Communications Inc.
Ben Hutchings8ceee662008-04-27 12:55:59 +01005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/pci.h>
12#include <linux/tcp.h>
13#include <linux/ip.h>
14#include <linux/in.h>
Ben Hutchings738a8f42009-11-29 15:16:05 +000015#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/slab.h>
Ben Hutchings738a8f42009-11-29 15:16:05 +000017#include <net/ipv6.h>
Ben Hutchings8ceee662008-04-27 12:55:59 +010018#include <linux/if_ether.h>
19#include <linux/highmem.h>
Ben Hutchings183233b2013-06-28 21:47:12 +010020#include <linux/cache.h>
Ben Hutchings8ceee662008-04-27 12:55:59 +010021#include "net_driver.h"
Ben Hutchings8ceee662008-04-27 12:55:59 +010022#include "efx.h"
Ben Hutchings183233b2013-06-28 21:47:12 +010023#include "io.h"
Ben Hutchings744093c2009-11-29 15:12:08 +000024#include "nic.h"
Ben Hutchings8ceee662008-04-27 12:55:59 +010025#include "workarounds.h"
Ben Hutchingsdfa50be2013-03-08 21:20:09 +000026#include "ef10_regs.h"
Ben Hutchings8ceee662008-04-27 12:55:59 +010027
Ben Hutchings183233b2013-06-28 21:47:12 +010028#ifdef EFX_USE_PIO
29
30#define EFX_PIOBUF_SIZE_MAX ER_DZ_TX_PIOBUF_SIZE
31#define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES)
32unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF;
33
34#endif /* EFX_USE_PIO */
35
Ben Hutchings0fe55652013-06-28 21:47:15 +010036static inline unsigned int
37efx_tx_queue_get_insert_index(const struct efx_tx_queue *tx_queue)
38{
39 return tx_queue->insert_count & tx_queue->ptr_mask;
40}
41
42static inline struct efx_tx_buffer *
43__efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
44{
45 return &tx_queue->buffer[efx_tx_queue_get_insert_index(tx_queue)];
46}
47
48static inline struct efx_tx_buffer *
49efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
50{
51 struct efx_tx_buffer *buffer =
52 __efx_tx_queue_get_insert_buffer(tx_queue);
53
54 EFX_BUG_ON_PARANOID(buffer->len);
55 EFX_BUG_ON_PARANOID(buffer->flags);
56 EFX_BUG_ON_PARANOID(buffer->unmap_len);
57
58 return buffer;
59}
60
Ben Hutchings4d566062008-09-01 12:47:12 +010061static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
Tom Herbertc3940992011-11-28 16:33:43 +000062 struct efx_tx_buffer *buffer,
63 unsigned int *pkts_compl,
64 unsigned int *bytes_compl)
Ben Hutchings8ceee662008-04-27 12:55:59 +010065{
66 if (buffer->unmap_len) {
Ben Hutchings0e33d872012-05-17 17:46:55 +010067 struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
Ben Hutchingscc12dac2008-09-01 12:46:43 +010068 dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
69 buffer->unmap_len);
Ben Hutchings7668ff92012-05-17 20:52:20 +010070 if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
Ben Hutchings0e33d872012-05-17 17:46:55 +010071 dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
72 DMA_TO_DEVICE);
Ben Hutchings8ceee662008-04-27 12:55:59 +010073 else
Ben Hutchings0e33d872012-05-17 17:46:55 +010074 dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
75 DMA_TO_DEVICE);
Ben Hutchings8ceee662008-04-27 12:55:59 +010076 buffer->unmap_len = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +010077 }
78
Ben Hutchings7668ff92012-05-17 20:52:20 +010079 if (buffer->flags & EFX_TX_BUF_SKB) {
Tom Herbertc3940992011-11-28 16:33:43 +000080 (*pkts_compl)++;
81 (*bytes_compl) += buffer->skb->len;
Ben Hutchings8ceee662008-04-27 12:55:59 +010082 dev_kfree_skb_any((struct sk_buff *) buffer->skb);
Ben Hutchings62776d02010-06-23 11:30:07 +000083 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
84 "TX queue %d transmission id %x complete\n",
85 tx_queue->queue, tx_queue->read_count);
Ben Hutchingsf7251a92012-05-17 18:40:54 +010086 } else if (buffer->flags & EFX_TX_BUF_HEAP) {
87 kfree(buffer->heap_buf);
Ben Hutchings8ceee662008-04-27 12:55:59 +010088 }
Ben Hutchings7668ff92012-05-17 20:52:20 +010089
Ben Hutchingsf7251a92012-05-17 18:40:54 +010090 buffer->len = 0;
91 buffer->flags = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +010092}
93
Ben Hutchingsb9b39b62008-05-07 12:51:12 +010094static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
Ben Hutchings740847d2008-09-01 12:48:23 +010095 struct sk_buff *skb);
Ben Hutchings8ceee662008-04-27 12:55:59 +010096
Ben Hutchings63f19882009-10-23 08:31:20 +000097static inline unsigned
98efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
99{
100 /* Depending on the NIC revision, we can use descriptor
101 * lengths up to 8K or 8K-1. However, since PCI Express
102 * devices must split read requests at 4K boundaries, there is
103 * little benefit from using descriptors that cross those
104 * boundaries and we keep things simple by not doing so.
105 */
Ben Hutchings5b6262d2012-02-02 21:21:15 +0000106 unsigned len = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1;
Ben Hutchings63f19882009-10-23 08:31:20 +0000107
108 /* Work around hardware bug for unaligned buffers. */
109 if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
110 len = min_t(unsigned, len, 512 - (dma_addr & 0xf));
111
112 return len;
113}
114
Ben Hutchings7e6d06f2012-07-30 15:57:44 +0000115unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
116{
117 /* Header and payload descriptor for each output segment, plus
118 * one for every input fragment boundary within a segment
119 */
120 unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
121
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000122 /* Possibly one more per segment for the alignment workaround,
123 * or for option descriptors
124 */
125 if (EFX_WORKAROUND_5391(efx) || efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
Ben Hutchings7e6d06f2012-07-30 15:57:44 +0000126 max_descs += EFX_TSO_MAX_SEGS;
127
128 /* Possibly more for PCIe page boundaries within input fragments */
129 if (PAGE_SIZE > EFX_PAGE_SIZE)
130 max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
131 DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
132
133 return max_descs;
134}
135
Ben Hutchings14bf7182012-05-22 01:27:58 +0100136/* Get partner of a TX queue, seen as part of the same net core queue */
137static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
138{
139 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
140 return tx_queue - EFX_TXQ_TYPE_OFFLOAD;
141 else
142 return tx_queue + EFX_TXQ_TYPE_OFFLOAD;
143}
144
145static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
146{
147 /* We need to consider both queues that the net core sees as one */
148 struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1);
149 struct efx_nic *efx = txq1->efx;
150 unsigned int fill_level;
151
152 fill_level = max(txq1->insert_count - txq1->old_read_count,
153 txq2->insert_count - txq2->old_read_count);
154 if (likely(fill_level < efx->txq_stop_thresh))
155 return;
156
157 /* We used the stale old_read_count above, which gives us a
158 * pessimistic estimate of the fill level (which may even
159 * validly be >= efx->txq_entries). Now try again using
160 * read_count (more likely to be a cache miss).
161 *
162 * If we read read_count and then conditionally stop the
163 * queue, it is possible for the completion path to race with
164 * us and complete all outstanding descriptors in the middle,
165 * after which there will be no more completions to wake it.
166 * Therefore we stop the queue first, then read read_count
167 * (with a memory barrier to ensure the ordering), then
168 * restart the queue if the fill level turns out to be low
169 * enough.
170 */
171 netif_tx_stop_queue(txq1->core_txq);
172 smp_mb();
173 txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
174 txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
175
176 fill_level = max(txq1->insert_count - txq1->old_read_count,
177 txq2->insert_count - txq2->old_read_count);
178 EFX_BUG_ON_PARANOID(fill_level >= efx->txq_entries);
179 if (likely(fill_level < efx->txq_stop_thresh)) {
180 smp_mb();
181 if (likely(!efx->loopback_selftest))
182 netif_tx_start_queue(txq1->core_txq);
183 }
184}
185
Ben Hutchings8ceee662008-04-27 12:55:59 +0100186/*
187 * Add a socket buffer to a TX queue
188 *
189 * This maps all fragments of a socket buffer for DMA and adds them to
190 * the TX queue. The queue's insert pointer will be incremented by
191 * the number of fragments in the socket buffer.
192 *
193 * If any DMA mapping fails, any mapped fragments will be unmapped,
194 * the queue's insert pointer will be restored to its original value.
195 *
Ben Hutchings497f5ba2009-11-23 16:07:05 +0000196 * This function is split out from efx_hard_start_xmit to allow the
197 * loopback test to direct packets via specific TX queues.
198 *
Ben Hutchings14bf7182012-05-22 01:27:58 +0100199 * Returns NETDEV_TX_OK.
Ben Hutchings8ceee662008-04-27 12:55:59 +0100200 * You must hold netif_tx_lock() to call this function.
201 */
Ben Hutchings497f5ba2009-11-23 16:07:05 +0000202netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100203{
204 struct efx_nic *efx = tx_queue->efx;
Ben Hutchings0e33d872012-05-17 17:46:55 +0100205 struct device *dma_dev = &efx->pci_dev->dev;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100206 struct efx_tx_buffer *buffer;
207 skb_frag_t *fragment;
Ben Hutchings0fe55652013-06-28 21:47:15 +0100208 unsigned int len, unmap_len = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100209 dma_addr_t dma_addr, unmap_addr = 0;
210 unsigned int dma_len;
Ben Hutchings7668ff92012-05-17 20:52:20 +0100211 unsigned short dma_flags;
Ben Hutchings14bf7182012-05-22 01:27:58 +0100212 int i = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100213
214 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
215
Ben Hutchings9bc183d2009-11-23 16:06:47 +0000216 if (skb_shinfo(skb)->gso_size)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100217 return efx_enqueue_skb_tso(tx_queue, skb);
218
Ben Hutchings8ceee662008-04-27 12:55:59 +0100219 /* Get size of the initial fragment */
220 len = skb_headlen(skb);
221
Ben Hutchingsbb145a92009-03-20 13:25:39 +0000222 /* Pad if necessary */
223 if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) {
224 EFX_BUG_ON_PARANOID(skb->data_len);
225 len = 32 + 1;
226 if (skb_pad(skb, len - skb->len))
227 return NETDEV_TX_OK;
228 }
229
Ben Hutchings0e33d872012-05-17 17:46:55 +0100230 /* Map for DMA. Use dma_map_single rather than dma_map_page
Ben Hutchings8ceee662008-04-27 12:55:59 +0100231 * since this is more efficient on machines with sparse
232 * memory.
233 */
Ben Hutchings7668ff92012-05-17 20:52:20 +0100234 dma_flags = EFX_TX_BUF_MAP_SINGLE;
Ben Hutchings0e33d872012-05-17 17:46:55 +0100235 dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100236
237 /* Process all fragments */
238 while (1) {
Ben Hutchings0e33d872012-05-17 17:46:55 +0100239 if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
240 goto dma_err;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100241
242 /* Store fields for marking in the per-fragment final
243 * descriptor */
244 unmap_len = len;
245 unmap_addr = dma_addr;
246
247 /* Add to TX queue, splitting across DMA boundaries */
248 do {
Ben Hutchings0fe55652013-06-28 21:47:15 +0100249 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100250
Ben Hutchings63f19882009-10-23 08:31:20 +0000251 dma_len = efx_max_tx_len(efx, dma_addr);
252 if (likely(dma_len >= len))
Ben Hutchings8ceee662008-04-27 12:55:59 +0100253 dma_len = len;
254
Ben Hutchings8ceee662008-04-27 12:55:59 +0100255 /* Fill out per descriptor fields */
256 buffer->len = dma_len;
257 buffer->dma_addr = dma_addr;
Ben Hutchings7668ff92012-05-17 20:52:20 +0100258 buffer->flags = EFX_TX_BUF_CONT;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100259 len -= dma_len;
260 dma_addr += dma_len;
261 ++tx_queue->insert_count;
262 } while (len);
263
264 /* Transfer ownership of the unmapping to the final buffer */
Ben Hutchings7668ff92012-05-17 20:52:20 +0100265 buffer->flags = EFX_TX_BUF_CONT | dma_flags;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100266 buffer->unmap_len = unmap_len;
267 unmap_len = 0;
268
269 /* Get address and size of next fragment */
270 if (i >= skb_shinfo(skb)->nr_frags)
271 break;
272 fragment = &skb_shinfo(skb)->frags[i];
Eric Dumazet9e903e02011-10-18 21:00:24 +0000273 len = skb_frag_size(fragment);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100274 i++;
275 /* Map for DMA */
Ben Hutchings7668ff92012-05-17 20:52:20 +0100276 dma_flags = 0;
Ben Hutchings0e33d872012-05-17 17:46:55 +0100277 dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
Ian Campbell5d6bcdf2011-10-06 11:10:48 +0100278 DMA_TO_DEVICE);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100279 }
280
281 /* Transfer ownership of the skb to the final buffer */
282 buffer->skb = skb;
Ben Hutchings7668ff92012-05-17 20:52:20 +0100283 buffer->flags = EFX_TX_BUF_SKB | dma_flags;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100284
Tom Herbertc3940992011-11-28 16:33:43 +0000285 netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
286
Ben Hutchings8ceee662008-04-27 12:55:59 +0100287 /* Pass off to hardware */
Ben Hutchings152b6a62009-11-29 03:43:56 +0000288 efx_nic_push_buffers(tx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100289
Ben Hutchings14bf7182012-05-22 01:27:58 +0100290 efx_tx_maybe_stop_queue(tx_queue);
291
Ben Hutchings8ceee662008-04-27 12:55:59 +0100292 return NETDEV_TX_OK;
293
Ben Hutchings0e33d872012-05-17 17:46:55 +0100294 dma_err:
Ben Hutchings62776d02010-06-23 11:30:07 +0000295 netif_err(efx, tx_err, efx->net_dev,
296 " TX queue %d could not map skb with %d bytes %d "
297 "fragments for DMA\n", tx_queue->queue, skb->len,
298 skb_shinfo(skb)->nr_frags + 1);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100299
300 /* Mark the packet as transmitted, and free the SKB ourselves */
Ben Hutchings9bc183d2009-11-23 16:06:47 +0000301 dev_kfree_skb_any(skb);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100302
Ben Hutchings8ceee662008-04-27 12:55:59 +0100303 /* Work backwards until we hit the original insert pointer value */
304 while (tx_queue->insert_count != tx_queue->write_count) {
Tom Herbertc3940992011-11-28 16:33:43 +0000305 unsigned int pkts_compl = 0, bytes_compl = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100306 --tx_queue->insert_count;
Ben Hutchings0fe55652013-06-28 21:47:15 +0100307 buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
Tom Herbertc3940992011-11-28 16:33:43 +0000308 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100309 }
310
311 /* Free the fragment we were mid-way through pushing */
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100312 if (unmap_len) {
Ben Hutchings7668ff92012-05-17 20:52:20 +0100313 if (dma_flags & EFX_TX_BUF_MAP_SINGLE)
Ben Hutchings0e33d872012-05-17 17:46:55 +0100314 dma_unmap_single(dma_dev, unmap_addr, unmap_len,
315 DMA_TO_DEVICE);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100316 else
Ben Hutchings0e33d872012-05-17 17:46:55 +0100317 dma_unmap_page(dma_dev, unmap_addr, unmap_len,
318 DMA_TO_DEVICE);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100319 }
Ben Hutchings8ceee662008-04-27 12:55:59 +0100320
Ben Hutchings14bf7182012-05-22 01:27:58 +0100321 return NETDEV_TX_OK;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100322}
323
324/* Remove packets from the TX queue
325 *
326 * This removes packets from the TX queue, up to and including the
327 * specified index.
328 */
Ben Hutchings4d566062008-09-01 12:47:12 +0100329static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
Tom Herbertc3940992011-11-28 16:33:43 +0000330 unsigned int index,
331 unsigned int *pkts_compl,
332 unsigned int *bytes_compl)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100333{
334 struct efx_nic *efx = tx_queue->efx;
335 unsigned int stop_index, read_ptr;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100336
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000337 stop_index = (index + 1) & tx_queue->ptr_mask;
338 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100339
340 while (read_ptr != stop_index) {
341 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
Ben Hutchingsba8977b2013-01-08 23:43:19 +0000342
343 if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
344 unlikely(buffer->len == 0)) {
Ben Hutchings62776d02010-06-23 11:30:07 +0000345 netif_err(efx, tx_err, efx->net_dev,
346 "TX queue %d spurious TX completion id %x\n",
347 tx_queue->queue, read_ptr);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100348 efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
349 return;
350 }
351
Tom Herbertc3940992011-11-28 16:33:43 +0000352 efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100353
354 ++tx_queue->read_count;
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000355 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100356 }
357}
358
Ben Hutchings8ceee662008-04-27 12:55:59 +0100359/* Initiate a packet transmission. We use one channel per CPU
360 * (sharing when we have more CPUs than channels). On Falcon, the TX
361 * completion events will be directed back to the CPU that transmitted
362 * the packet, which should be cache-efficient.
363 *
364 * Context: non-blocking.
365 * Note that returning anything other than NETDEV_TX_OK will cause the
366 * OS to free the skb.
367 */
Stephen Hemminger613573252009-08-31 19:50:58 +0000368netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
Ben Hutchings2d0cc562012-02-17 00:10:45 +0000369 struct net_device *net_dev)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100370{
Ben Hutchings767e4682008-09-01 12:43:14 +0100371 struct efx_nic *efx = netdev_priv(net_dev);
Ben Hutchings60ac1062008-09-01 12:44:59 +0100372 struct efx_tx_queue *tx_queue;
Ben Hutchings94b274b2011-01-10 21:18:20 +0000373 unsigned index, type;
Ben Hutchings60ac1062008-09-01 12:44:59 +0100374
Ben Hutchingse4abce82011-05-16 18:51:24 +0100375 EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
Ben Hutchingsa7ef5932009-03-04 09:52:37 +0000376
Stuart Hodgson7c236c42012-09-03 11:09:36 +0100377 /* PTP "event" packet */
378 if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
379 unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
380 return efx_ptp_tx(efx, skb);
381 }
382
Ben Hutchings94b274b2011-01-10 21:18:20 +0000383 index = skb_get_queue_mapping(skb);
384 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
385 if (index >= efx->n_tx_channels) {
386 index -= efx->n_tx_channels;
387 type |= EFX_TXQ_TYPE_HIGHPRI;
388 }
389 tx_queue = efx_get_tx_queue(efx, index, type);
Ben Hutchings60ac1062008-09-01 12:44:59 +0100390
Ben Hutchings497f5ba2009-11-23 16:07:05 +0000391 return efx_enqueue_skb(tx_queue, skb);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100392}
393
Ben Hutchings60031fc2011-01-12 18:39:40 +0000394void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
395{
Ben Hutchings94b274b2011-01-10 21:18:20 +0000396 struct efx_nic *efx = tx_queue->efx;
397
Ben Hutchings60031fc2011-01-12 18:39:40 +0000398 /* Must be inverse of queue lookup in efx_hard_start_xmit() */
Ben Hutchings94b274b2011-01-10 21:18:20 +0000399 tx_queue->core_txq =
400 netdev_get_tx_queue(efx->net_dev,
401 tx_queue->queue / EFX_TXQ_TYPES +
402 ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
403 efx->n_tx_channels : 0));
404}
405
406int efx_setup_tc(struct net_device *net_dev, u8 num_tc)
407{
408 struct efx_nic *efx = netdev_priv(net_dev);
409 struct efx_channel *channel;
410 struct efx_tx_queue *tx_queue;
411 unsigned tc;
412 int rc;
413
414 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
415 return -EINVAL;
416
417 if (num_tc == net_dev->num_tc)
418 return 0;
419
420 for (tc = 0; tc < num_tc; tc++) {
421 net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
422 net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
423 }
424
425 if (num_tc > net_dev->num_tc) {
426 /* Initialise high-priority queues as necessary */
427 efx_for_each_channel(channel, efx) {
428 efx_for_each_possible_channel_tx_queue(tx_queue,
429 channel) {
430 if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
431 continue;
432 if (!tx_queue->buffer) {
433 rc = efx_probe_tx_queue(tx_queue);
434 if (rc)
435 return rc;
436 }
437 if (!tx_queue->initialised)
438 efx_init_tx_queue(tx_queue);
439 efx_init_tx_queue_core_txq(tx_queue);
440 }
441 }
442 } else {
443 /* Reduce number of classes before number of queues */
444 net_dev->num_tc = num_tc;
445 }
446
447 rc = netif_set_real_num_tx_queues(net_dev,
448 max_t(int, num_tc, 1) *
449 efx->n_tx_channels);
450 if (rc)
451 return rc;
452
453 /* Do not destroy high-priority queues when they become
454 * unused. We would have to flush them first, and it is
455 * fairly difficult to flush a subset of TX queues. Leave
456 * it to efx_fini_channels().
457 */
458
459 net_dev->num_tc = num_tc;
460 return 0;
Ben Hutchings60031fc2011-01-12 18:39:40 +0000461}
462
Ben Hutchings8ceee662008-04-27 12:55:59 +0100463void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
464{
465 unsigned fill_level;
466 struct efx_nic *efx = tx_queue->efx;
Ben Hutchings14bf7182012-05-22 01:27:58 +0100467 struct efx_tx_queue *txq2;
Tom Herbertc3940992011-11-28 16:33:43 +0000468 unsigned int pkts_compl = 0, bytes_compl = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100469
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000470 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100471
Tom Herbertc3940992011-11-28 16:33:43 +0000472 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
473 netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100474
Ben Hutchings02e12162013-04-27 01:55:21 +0100475 if (pkts_compl > 1)
476 ++tx_queue->merge_events;
477
Ben Hutchings14bf7182012-05-22 01:27:58 +0100478 /* See if we need to restart the netif queue. This memory
479 * barrier ensures that we write read_count (inside
480 * efx_dequeue_buffers()) before reading the queue status.
481 */
Ben Hutchings8ceee662008-04-27 12:55:59 +0100482 smp_mb();
Ben Hutchingsc04bfc62010-12-10 01:24:16 +0000483 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
Neil Turton9d1aea62011-04-04 13:46:23 +0100484 likely(efx->port_enabled) &&
Ben Hutchingse4abce82011-05-16 18:51:24 +0100485 likely(netif_device_present(efx->net_dev))) {
Ben Hutchings14bf7182012-05-22 01:27:58 +0100486 txq2 = efx_tx_queue_partner(tx_queue);
487 fill_level = max(tx_queue->insert_count - tx_queue->read_count,
488 txq2->insert_count - txq2->read_count);
489 if (fill_level <= efx->txq_wake_thresh)
Ben Hutchingsc04bfc62010-12-10 01:24:16 +0000490 netif_tx_wake_queue(tx_queue->core_txq);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100491 }
Ben Hutchingscd385572010-11-15 23:53:11 +0000492
493 /* Check whether the hardware queue is now empty */
494 if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
495 tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
496 if (tx_queue->read_count == tx_queue->old_write_count) {
497 smp_mb();
498 tx_queue->empty_read_count =
499 tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
500 }
501 }
Ben Hutchings8ceee662008-04-27 12:55:59 +0100502}
503
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100504/* Size of page-based TSO header buffers. Larger blocks must be
505 * allocated from the heap.
506 */
507#define TSOH_STD_SIZE 128
508#define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE)
509
510/* At most half the descriptors in the queue at any time will refer to
511 * a TSO header buffer, since they must always be followed by a
512 * payload descriptor referring to an skb.
513 */
514static unsigned int efx_tsoh_page_count(struct efx_tx_queue *tx_queue)
515{
516 return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 2 * TSOH_PER_PAGE);
517}
518
Ben Hutchings8ceee662008-04-27 12:55:59 +0100519int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
520{
521 struct efx_nic *efx = tx_queue->efx;
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000522 unsigned int entries;
Ben Hutchings7668ff92012-05-17 20:52:20 +0100523 int rc;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100524
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000525 /* Create the smallest power-of-two aligned ring */
526 entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
527 EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
528 tx_queue->ptr_mask = entries - 1;
529
530 netif_dbg(efx, probe, efx->net_dev,
531 "creating TX queue %d size %#x mask %#x\n",
532 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100533
534 /* Allocate software ring */
Thomas Meyerc2e4e252011-12-02 12:36:13 +0000535 tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000536 GFP_KERNEL);
Ben Hutchings60ac1062008-09-01 12:44:59 +0100537 if (!tx_queue->buffer)
538 return -ENOMEM;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100539
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100540 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) {
541 tx_queue->tsoh_page =
542 kcalloc(efx_tsoh_page_count(tx_queue),
543 sizeof(tx_queue->tsoh_page[0]), GFP_KERNEL);
544 if (!tx_queue->tsoh_page) {
545 rc = -ENOMEM;
546 goto fail1;
547 }
548 }
549
Ben Hutchings8ceee662008-04-27 12:55:59 +0100550 /* Allocate hardware ring */
Ben Hutchings152b6a62009-11-29 03:43:56 +0000551 rc = efx_nic_probe_tx(tx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100552 if (rc)
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100553 goto fail2;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100554
555 return 0;
556
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100557fail2:
558 kfree(tx_queue->tsoh_page);
559 tx_queue->tsoh_page = NULL;
560fail1:
Ben Hutchings8ceee662008-04-27 12:55:59 +0100561 kfree(tx_queue->buffer);
562 tx_queue->buffer = NULL;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100563 return rc;
564}
565
Ben Hutchingsbc3c90a2008-09-01 12:48:46 +0100566void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100567{
Ben Hutchings62776d02010-06-23 11:30:07 +0000568 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
569 "initialising TX queue %d\n", tx_queue->queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100570
571 tx_queue->insert_count = 0;
572 tx_queue->write_count = 0;
Ben Hutchingscd385572010-11-15 23:53:11 +0000573 tx_queue->old_write_count = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100574 tx_queue->read_count = 0;
575 tx_queue->old_read_count = 0;
Ben Hutchingscd385572010-11-15 23:53:11 +0000576 tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100577
578 /* Set up TX descriptor ring */
Ben Hutchings152b6a62009-11-29 03:43:56 +0000579 efx_nic_init_tx(tx_queue);
Ben Hutchings94b274b2011-01-10 21:18:20 +0000580
581 tx_queue->initialised = true;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100582}
583
Ben Hutchingse42c3d82013-05-27 16:52:54 +0100584void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100585{
586 struct efx_tx_buffer *buffer;
587
Ben Hutchingse42c3d82013-05-27 16:52:54 +0100588 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
589 "shutting down TX queue %d\n", tx_queue->queue);
590
Ben Hutchings8ceee662008-04-27 12:55:59 +0100591 if (!tx_queue->buffer)
592 return;
593
594 /* Free any buffers left in the ring */
595 while (tx_queue->read_count != tx_queue->write_count) {
Tom Herbertc3940992011-11-28 16:33:43 +0000596 unsigned int pkts_compl = 0, bytes_compl = 0;
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000597 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
Tom Herbertc3940992011-11-28 16:33:43 +0000598 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100599
600 ++tx_queue->read_count;
601 }
Tom Herbertc3940992011-11-28 16:33:43 +0000602 netdev_tx_reset_queue(tx_queue->core_txq);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100603}
604
Ben Hutchings8ceee662008-04-27 12:55:59 +0100605void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
606{
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100607 int i;
608
Ben Hutchings94b274b2011-01-10 21:18:20 +0000609 if (!tx_queue->buffer)
610 return;
611
Ben Hutchings62776d02010-06-23 11:30:07 +0000612 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
613 "destroying TX queue %d\n", tx_queue->queue);
Ben Hutchings152b6a62009-11-29 03:43:56 +0000614 efx_nic_remove_tx(tx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100615
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100616 if (tx_queue->tsoh_page) {
617 for (i = 0; i < efx_tsoh_page_count(tx_queue); i++)
618 efx_nic_free_buffer(tx_queue->efx,
619 &tx_queue->tsoh_page[i]);
620 kfree(tx_queue->tsoh_page);
621 tx_queue->tsoh_page = NULL;
622 }
623
Ben Hutchings8ceee662008-04-27 12:55:59 +0100624 kfree(tx_queue->buffer);
625 tx_queue->buffer = NULL;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100626}
627
628
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100629/* Efx TCP segmentation acceleration.
630 *
631 * Why? Because by doing it here in the driver we can go significantly
632 * faster than the GSO.
633 *
634 * Requires TX checksum offload support.
635 */
636
637/* Number of bytes inserted at the start of a TSO header buffer,
638 * similar to NET_IP_ALIGN.
639 */
Ben Hutchings13e9ab12008-09-01 12:50:28 +0100640#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100641#define TSOH_OFFSET 0
642#else
643#define TSOH_OFFSET NET_IP_ALIGN
644#endif
645
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100646#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100647
648/**
649 * struct tso_state - TSO state for an SKB
Ben Hutchings23d9e602008-09-01 12:47:02 +0100650 * @out_len: Remaining length in current segment
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100651 * @seqnum: Current sequence number
Ben Hutchings23d9e602008-09-01 12:47:02 +0100652 * @ipv4_id: Current IPv4 ID, host endian
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100653 * @packet_space: Remaining space in current packet
Ben Hutchings23d9e602008-09-01 12:47:02 +0100654 * @dma_addr: DMA address of current position
655 * @in_len: Remaining length in current SKB fragment
656 * @unmap_len: Length of SKB fragment
657 * @unmap_addr: DMA address of SKB fragment
Ben Hutchings7668ff92012-05-17 20:52:20 +0100658 * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0
Ben Hutchings738a8f42009-11-29 15:16:05 +0000659 * @protocol: Network protocol (after any VLAN header)
Ben Hutchings97142842012-06-22 02:44:01 +0100660 * @ip_off: Offset of IP header
661 * @tcp_off: Offset of TCP header
Ben Hutchings23d9e602008-09-01 12:47:02 +0100662 * @header_len: Number of bytes of header
Ben Hutchings53cb13c2012-06-19 20:03:41 +0100663 * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000664 * @header_dma_addr: Header DMA address, when using option descriptors
665 * @header_unmap_len: Header DMA mapped length, or 0 if not using option
666 * descriptors
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100667 *
668 * The state used during segmentation. It is put into this data structure
669 * just to make it easy to pass into inline functions.
670 */
671struct tso_state {
Ben Hutchings23d9e602008-09-01 12:47:02 +0100672 /* Output position */
673 unsigned out_len;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100674 unsigned seqnum;
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000675 u16 ipv4_id;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100676 unsigned packet_space;
677
Ben Hutchings23d9e602008-09-01 12:47:02 +0100678 /* Input position */
679 dma_addr_t dma_addr;
680 unsigned in_len;
681 unsigned unmap_len;
682 dma_addr_t unmap_addr;
Ben Hutchings7668ff92012-05-17 20:52:20 +0100683 unsigned short dma_flags;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100684
Ben Hutchings738a8f42009-11-29 15:16:05 +0000685 __be16 protocol;
Ben Hutchings97142842012-06-22 02:44:01 +0100686 unsigned int ip_off;
687 unsigned int tcp_off;
Ben Hutchings23d9e602008-09-01 12:47:02 +0100688 unsigned header_len;
Ben Hutchings53cb13c2012-06-19 20:03:41 +0100689 unsigned int ip_base_len;
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000690 dma_addr_t header_dma_addr;
691 unsigned int header_unmap_len;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100692};
693
694
695/*
696 * Verify that our various assumptions about sk_buffs and the conditions
Ben Hutchings738a8f42009-11-29 15:16:05 +0000697 * under which TSO will be attempted hold true. Return the protocol number.
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100698 */
Ben Hutchings738a8f42009-11-29 15:16:05 +0000699static __be16 efx_tso_check_protocol(struct sk_buff *skb)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100700{
Ben Hutchings740847d2008-09-01 12:48:23 +0100701 __be16 protocol = skb->protocol;
702
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100703 EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
Ben Hutchings740847d2008-09-01 12:48:23 +0100704 protocol);
705 if (protocol == htons(ETH_P_8021Q)) {
Ben Hutchings740847d2008-09-01 12:48:23 +0100706 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
707 protocol = veh->h_vlan_encapsulated_proto;
Ben Hutchings740847d2008-09-01 12:48:23 +0100708 }
709
Ben Hutchings738a8f42009-11-29 15:16:05 +0000710 if (protocol == htons(ETH_P_IP)) {
711 EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
712 } else {
713 EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6));
714 EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP);
715 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100716 EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
717 + (tcp_hdr(skb)->doff << 2u)) >
718 skb_headlen(skb));
Ben Hutchings738a8f42009-11-29 15:16:05 +0000719
720 return protocol;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100721}
722
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100723static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
724 struct efx_tx_buffer *buffer, unsigned int len)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100725{
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100726 u8 *result;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100727
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100728 EFX_BUG_ON_PARANOID(buffer->len);
729 EFX_BUG_ON_PARANOID(buffer->flags);
730 EFX_BUG_ON_PARANOID(buffer->unmap_len);
731
732 if (likely(len <= TSOH_STD_SIZE - TSOH_OFFSET)) {
733 unsigned index =
734 (tx_queue->insert_count & tx_queue->ptr_mask) / 2;
735 struct efx_buffer *page_buf =
736 &tx_queue->tsoh_page[index / TSOH_PER_PAGE];
737 unsigned offset =
738 TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET;
739
740 if (unlikely(!page_buf->addr) &&
Ben Hutchings0d19a542012-09-18 21:59:52 +0100741 efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
742 GFP_ATOMIC))
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100743 return NULL;
744
745 result = (u8 *)page_buf->addr + offset;
746 buffer->dma_addr = page_buf->dma_addr + offset;
747 buffer->flags = EFX_TX_BUF_CONT;
748 } else {
749 tx_queue->tso_long_headers++;
750
751 buffer->heap_buf = kmalloc(TSOH_OFFSET + len, GFP_ATOMIC);
752 if (unlikely(!buffer->heap_buf))
753 return NULL;
754 result = (u8 *)buffer->heap_buf + TSOH_OFFSET;
755 buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100756 }
757
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100758 buffer->len = len;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100759
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100760 return result;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100761}
762
763/**
764 * efx_tx_queue_insert - push descriptors onto the TX queue
765 * @tx_queue: Efx TX queue
766 * @dma_addr: DMA address of fragment
767 * @len: Length of fragment
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100768 * @final_buffer: The final buffer inserted into the queue
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100769 *
Ben Hutchings14bf7182012-05-22 01:27:58 +0100770 * Push descriptors onto the TX queue.
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100771 */
Ben Hutchings14bf7182012-05-22 01:27:58 +0100772static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
773 dma_addr_t dma_addr, unsigned len,
774 struct efx_tx_buffer **final_buffer)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100775{
776 struct efx_tx_buffer *buffer;
777 struct efx_nic *efx = tx_queue->efx;
Ben Hutchings0fe55652013-06-28 21:47:15 +0100778 unsigned dma_len;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100779
780 EFX_BUG_ON_PARANOID(len <= 0);
781
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100782 while (1) {
Ben Hutchings0fe55652013-06-28 21:47:15 +0100783 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100784 ++tx_queue->insert_count;
785
786 EFX_BUG_ON_PARANOID(tx_queue->insert_count -
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000787 tx_queue->read_count >=
788 efx->txq_entries);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100789
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100790 buffer->dma_addr = dma_addr;
791
Ben Hutchings63f19882009-10-23 08:31:20 +0000792 dma_len = efx_max_tx_len(efx, dma_addr);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100793
794 /* If there is enough space to send then do so */
795 if (dma_len >= len)
796 break;
797
Ben Hutchings7668ff92012-05-17 20:52:20 +0100798 buffer->len = dma_len;
799 buffer->flags = EFX_TX_BUF_CONT;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100800 dma_addr += dma_len;
801 len -= dma_len;
802 }
803
804 EFX_BUG_ON_PARANOID(!len);
805 buffer->len = len;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100806 *final_buffer = buffer;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100807}
808
809
810/*
811 * Put a TSO header into the TX queue.
812 *
813 * This is special-cased because we know that it is small enough to fit in
814 * a single fragment, and we know it doesn't cross a page boundary. It
815 * also allows us to not worry about end-of-packet etc.
816 */
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100817static int efx_tso_put_header(struct efx_tx_queue *tx_queue,
818 struct efx_tx_buffer *buffer, u8 *header)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100819{
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100820 if (unlikely(buffer->flags & EFX_TX_BUF_HEAP)) {
821 buffer->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
822 header, buffer->len,
823 DMA_TO_DEVICE);
824 if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
825 buffer->dma_addr))) {
826 kfree(buffer->heap_buf);
827 buffer->len = 0;
828 buffer->flags = 0;
829 return -ENOMEM;
830 }
831 buffer->unmap_len = buffer->len;
832 buffer->flags |= EFX_TX_BUF_MAP_SINGLE;
833 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100834
835 ++tx_queue->insert_count;
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100836 return 0;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100837}
838
839
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100840/* Remove buffers put into a tx_queue. None of the buffers must have
841 * an skb attached.
842 */
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100843static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
844{
845 struct efx_tx_buffer *buffer;
846
847 /* Work backwards until we hit the original insert pointer value */
848 while (tx_queue->insert_count != tx_queue->write_count) {
849 --tx_queue->insert_count;
Ben Hutchings0fe55652013-06-28 21:47:15 +0100850 buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100851 efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100852 }
853}
854
855
856/* Parse the SKB header and initialise state. */
Ben Hutchingsc78c39e2013-03-08 20:03:17 +0000857static int tso_start(struct tso_state *st, struct efx_nic *efx,
858 const struct sk_buff *skb)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100859{
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000860 bool use_options = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
861 struct device *dma_dev = &efx->pci_dev->dev;
Ben Hutchingsc78c39e2013-03-08 20:03:17 +0000862 unsigned int header_len, in_len;
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000863 dma_addr_t dma_addr;
Ben Hutchingsc78c39e2013-03-08 20:03:17 +0000864
Ben Hutchings97142842012-06-22 02:44:01 +0100865 st->ip_off = skb_network_header(skb) - skb->data;
866 st->tcp_off = skb_transport_header(skb) - skb->data;
Ben Hutchingsc78c39e2013-03-08 20:03:17 +0000867 header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
868 in_len = skb_headlen(skb) - header_len;
869 st->header_len = header_len;
870 st->in_len = in_len;
Ben Hutchings53cb13c2012-06-19 20:03:41 +0100871 if (st->protocol == htons(ETH_P_IP)) {
Ben Hutchings97142842012-06-22 02:44:01 +0100872 st->ip_base_len = st->header_len - st->ip_off;
Ben Hutchings738a8f42009-11-29 15:16:05 +0000873 st->ipv4_id = ntohs(ip_hdr(skb)->id);
Ben Hutchings53cb13c2012-06-19 20:03:41 +0100874 } else {
Ben Hutchings97142842012-06-22 02:44:01 +0100875 st->ip_base_len = st->header_len - st->tcp_off;
Ben Hutchings738a8f42009-11-29 15:16:05 +0000876 st->ipv4_id = 0;
Ben Hutchings53cb13c2012-06-19 20:03:41 +0100877 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100878 st->seqnum = ntohl(tcp_hdr(skb)->seq);
879
880 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
881 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
882 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
883
Ben Hutchingsc78c39e2013-03-08 20:03:17 +0000884 st->out_len = skb->len - header_len;
885
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000886 if (!use_options) {
887 st->header_unmap_len = 0;
888
889 if (likely(in_len == 0)) {
890 st->dma_flags = 0;
891 st->unmap_len = 0;
892 return 0;
893 }
894
895 dma_addr = dma_map_single(dma_dev, skb->data + header_len,
896 in_len, DMA_TO_DEVICE);
897 st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
898 st->dma_addr = dma_addr;
899 st->unmap_addr = dma_addr;
900 st->unmap_len = in_len;
901 } else {
902 dma_addr = dma_map_single(dma_dev, skb->data,
903 skb_headlen(skb), DMA_TO_DEVICE);
904 st->header_dma_addr = dma_addr;
905 st->header_unmap_len = skb_headlen(skb);
Ben Hutchingsc78c39e2013-03-08 20:03:17 +0000906 st->dma_flags = 0;
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000907 st->dma_addr = dma_addr + header_len;
908 st->unmap_len = 0;
Ben Hutchingsc78c39e2013-03-08 20:03:17 +0000909 }
910
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000911 return unlikely(dma_mapping_error(dma_dev, dma_addr)) ? -ENOMEM : 0;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100912}
913
Ben Hutchings4d566062008-09-01 12:47:12 +0100914static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
915 skb_frag_t *frag)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100916{
Ian Campbell4a22c4c2011-09-21 21:53:16 +0000917 st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000918 skb_frag_size(frag), DMA_TO_DEVICE);
Ian Campbell5d6bcdf2011-10-06 11:10:48 +0100919 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
Ben Hutchings7668ff92012-05-17 20:52:20 +0100920 st->dma_flags = 0;
Eric Dumazet9e903e02011-10-18 21:00:24 +0000921 st->unmap_len = skb_frag_size(frag);
922 st->in_len = skb_frag_size(frag);
Ben Hutchings23d9e602008-09-01 12:47:02 +0100923 st->dma_addr = st->unmap_addr;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100924 return 0;
925 }
926 return -ENOMEM;
927}
928
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100929
930/**
931 * tso_fill_packet_with_fragment - form descriptors for the current fragment
932 * @tx_queue: Efx TX queue
933 * @skb: Socket buffer
934 * @st: TSO state
935 *
936 * Form descriptors for the current fragment, until we reach the end
Ben Hutchings14bf7182012-05-22 01:27:58 +0100937 * of fragment or end-of-packet.
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100938 */
Ben Hutchings14bf7182012-05-22 01:27:58 +0100939static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
940 const struct sk_buff *skb,
941 struct tso_state *st)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100942{
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100943 struct efx_tx_buffer *buffer;
Ben Hutchings14bf7182012-05-22 01:27:58 +0100944 int n;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100945
Ben Hutchings23d9e602008-09-01 12:47:02 +0100946 if (st->in_len == 0)
Ben Hutchings14bf7182012-05-22 01:27:58 +0100947 return;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100948 if (st->packet_space == 0)
Ben Hutchings14bf7182012-05-22 01:27:58 +0100949 return;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100950
Ben Hutchings23d9e602008-09-01 12:47:02 +0100951 EFX_BUG_ON_PARANOID(st->in_len <= 0);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100952 EFX_BUG_ON_PARANOID(st->packet_space <= 0);
953
Ben Hutchings23d9e602008-09-01 12:47:02 +0100954 n = min(st->in_len, st->packet_space);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100955
956 st->packet_space -= n;
Ben Hutchings23d9e602008-09-01 12:47:02 +0100957 st->out_len -= n;
958 st->in_len -= n;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100959
Ben Hutchings14bf7182012-05-22 01:27:58 +0100960 efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100961
Ben Hutchings14bf7182012-05-22 01:27:58 +0100962 if (st->out_len == 0) {
963 /* Transfer ownership of the skb */
964 buffer->skb = skb;
965 buffer->flags = EFX_TX_BUF_SKB;
966 } else if (st->packet_space != 0) {
967 buffer->flags = EFX_TX_BUF_CONT;
968 }
969
970 if (st->in_len == 0) {
971 /* Transfer ownership of the DMA mapping */
972 buffer->unmap_len = st->unmap_len;
973 buffer->flags |= st->dma_flags;
974 st->unmap_len = 0;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100975 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100976
Ben Hutchings23d9e602008-09-01 12:47:02 +0100977 st->dma_addr += n;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100978}
979
980
981/**
982 * tso_start_new_packet - generate a new header and prepare for the new packet
983 * @tx_queue: Efx TX queue
984 * @skb: Socket buffer
985 * @st: TSO state
986 *
987 * Generate a new header and prepare for the new packet. Return 0 on
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100988 * success, or -%ENOMEM if failed to alloc header.
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100989 */
Ben Hutchings4d566062008-09-01 12:47:12 +0100990static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
991 const struct sk_buff *skb,
992 struct tso_state *st)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100993{
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100994 struct efx_tx_buffer *buffer =
Ben Hutchings0fe55652013-06-28 21:47:15 +0100995 efx_tx_queue_get_insert_buffer(tx_queue);
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000996 bool is_last = st->out_len <= skb_shinfo(skb)->gso_size;
997 u8 tcp_flags_clear;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100998
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000999 if (!is_last) {
Ben Hutchings53cb13c2012-06-19 20:03:41 +01001000 st->packet_space = skb_shinfo(skb)->gso_size;
Ben Hutchingsdfa50be2013-03-08 21:20:09 +00001001 tcp_flags_clear = 0x09; /* mask out FIN and PSH */
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001002 } else {
Ben Hutchings53cb13c2012-06-19 20:03:41 +01001003 st->packet_space = st->out_len;
Ben Hutchingsdfa50be2013-03-08 21:20:09 +00001004 tcp_flags_clear = 0x00;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001005 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001006
Ben Hutchingsdfa50be2013-03-08 21:20:09 +00001007 if (!st->header_unmap_len) {
1008 /* Allocate and insert a DMA-mapped header buffer. */
1009 struct tcphdr *tsoh_th;
1010 unsigned ip_length;
1011 u8 *header;
1012 int rc;
Ben Hutchings738a8f42009-11-29 15:16:05 +00001013
Ben Hutchingsdfa50be2013-03-08 21:20:09 +00001014 header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
1015 if (!header)
1016 return -ENOMEM;
Ben Hutchings738a8f42009-11-29 15:16:05 +00001017
Ben Hutchingsdfa50be2013-03-08 21:20:09 +00001018 tsoh_th = (struct tcphdr *)(header + st->tcp_off);
1019
1020 /* Copy and update the headers. */
1021 memcpy(header, skb->data, st->header_len);
1022
1023 tsoh_th->seq = htonl(st->seqnum);
1024 ((u8 *)tsoh_th)[13] &= ~tcp_flags_clear;
1025
1026 ip_length = st->ip_base_len + st->packet_space;
1027
1028 if (st->protocol == htons(ETH_P_IP)) {
1029 struct iphdr *tsoh_iph =
1030 (struct iphdr *)(header + st->ip_off);
1031
1032 tsoh_iph->tot_len = htons(ip_length);
1033 tsoh_iph->id = htons(st->ipv4_id);
1034 } else {
1035 struct ipv6hdr *tsoh_iph =
1036 (struct ipv6hdr *)(header + st->ip_off);
1037
1038 tsoh_iph->payload_len = htons(ip_length);
1039 }
1040
1041 rc = efx_tso_put_header(tx_queue, buffer, header);
1042 if (unlikely(rc))
1043 return rc;
Ben Hutchings738a8f42009-11-29 15:16:05 +00001044 } else {
Ben Hutchingsdfa50be2013-03-08 21:20:09 +00001045 /* Send the original headers with a TSO option descriptor
1046 * in front
1047 */
1048 u8 tcp_flags = ((u8 *)tcp_hdr(skb))[13] & ~tcp_flags_clear;
Ben Hutchings738a8f42009-11-29 15:16:05 +00001049
Ben Hutchingsdfa50be2013-03-08 21:20:09 +00001050 buffer->flags = EFX_TX_BUF_OPTION;
1051 buffer->len = 0;
1052 buffer->unmap_len = 0;
1053 EFX_POPULATE_QWORD_5(buffer->option,
1054 ESF_DZ_TX_DESC_IS_OPT, 1,
1055 ESF_DZ_TX_OPTION_TYPE,
1056 ESE_DZ_TX_OPTION_DESC_TSO,
1057 ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
1058 ESF_DZ_TX_TSO_IP_ID, st->ipv4_id,
1059 ESF_DZ_TX_TSO_TCP_SEQNO, st->seqnum);
1060 ++tx_queue->insert_count;
1061
1062 /* We mapped the headers in tso_start(). Unmap them
1063 * when the last segment is completed.
1064 */
Ben Hutchings0fe55652013-06-28 21:47:15 +01001065 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
Ben Hutchingsdfa50be2013-03-08 21:20:09 +00001066 buffer->dma_addr = st->header_dma_addr;
1067 buffer->len = st->header_len;
1068 if (is_last) {
1069 buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_MAP_SINGLE;
1070 buffer->unmap_len = st->header_unmap_len;
1071 /* Ensure we only unmap them once in case of a
1072 * later DMA mapping error and rollback
1073 */
1074 st->header_unmap_len = 0;
1075 } else {
1076 buffer->flags = EFX_TX_BUF_CONT;
1077 buffer->unmap_len = 0;
1078 }
1079 ++tx_queue->insert_count;
Ben Hutchings738a8f42009-11-29 15:16:05 +00001080 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001081
Ben Hutchingsdfa50be2013-03-08 21:20:09 +00001082 st->seqnum += skb_shinfo(skb)->gso_size;
1083
1084 /* Linux leaves suitable gaps in the IP ID space for us to fill. */
1085 ++st->ipv4_id;
Ben Hutchingsf7251a92012-05-17 18:40:54 +01001086
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001087 ++tx_queue->tso_packets;
1088
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001089 return 0;
1090}
1091
1092
1093/**
1094 * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
1095 * @tx_queue: Efx TX queue
1096 * @skb: Socket buffer
1097 *
1098 * Context: You must hold netif_tx_lock() to call this function.
1099 *
1100 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1101 * @skb was not enqueued. In all cases @skb is consumed. Return
Ben Hutchings14bf7182012-05-22 01:27:58 +01001102 * %NETDEV_TX_OK.
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001103 */
1104static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
Ben Hutchings740847d2008-09-01 12:48:23 +01001105 struct sk_buff *skb)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001106{
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001107 struct efx_nic *efx = tx_queue->efx;
Ben Hutchings14bf7182012-05-22 01:27:58 +01001108 int frag_i, rc;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001109 struct tso_state state;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001110
Ben Hutchings738a8f42009-11-29 15:16:05 +00001111 /* Find the packet protocol and sanity-check it */
1112 state.protocol = efx_tso_check_protocol(skb);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001113
1114 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
1115
Ben Hutchingsc78c39e2013-03-08 20:03:17 +00001116 rc = tso_start(&state, efx, skb);
1117 if (rc)
1118 goto mem_err;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001119
Ben Hutchingsc78c39e2013-03-08 20:03:17 +00001120 if (likely(state.in_len == 0)) {
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001121 /* Grab the first payload fragment. */
1122 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
1123 frag_i = 0;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001124 rc = tso_get_fragment(&state, efx,
1125 skb_shinfo(skb)->frags + frag_i);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001126 if (rc)
1127 goto mem_err;
1128 } else {
Ben Hutchingsc78c39e2013-03-08 20:03:17 +00001129 /* Payload starts in the header area. */
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001130 frag_i = -1;
1131 }
1132
1133 if (tso_start_new_packet(tx_queue, skb, &state) < 0)
1134 goto mem_err;
1135
1136 while (1) {
Ben Hutchings14bf7182012-05-22 01:27:58 +01001137 tso_fill_packet_with_fragment(tx_queue, skb, &state);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001138
1139 /* Move onto the next fragment? */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001140 if (state.in_len == 0) {
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001141 if (++frag_i >= skb_shinfo(skb)->nr_frags)
1142 /* End of payload reached. */
1143 break;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001144 rc = tso_get_fragment(&state, efx,
1145 skb_shinfo(skb)->frags + frag_i);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001146 if (rc)
1147 goto mem_err;
1148 }
1149
1150 /* Start at new packet? */
1151 if (state.packet_space == 0 &&
1152 tso_start_new_packet(tx_queue, skb, &state) < 0)
1153 goto mem_err;
1154 }
1155
Eric Dumazet449fa022011-11-30 17:12:27 -05001156 netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
1157
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001158 /* Pass off to hardware */
Ben Hutchings152b6a62009-11-29 03:43:56 +00001159 efx_nic_push_buffers(tx_queue);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001160
Ben Hutchings14bf7182012-05-22 01:27:58 +01001161 efx_tx_maybe_stop_queue(tx_queue);
1162
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001163 tx_queue->tso_bursts++;
1164 return NETDEV_TX_OK;
1165
1166 mem_err:
Ben Hutchings62776d02010-06-23 11:30:07 +00001167 netif_err(efx, tx_err, efx->net_dev,
Ben Hutchings0e33d872012-05-17 17:46:55 +01001168 "Out of memory for TSO headers, or DMA mapping error\n");
Ben Hutchings9bc183d2009-11-23 16:06:47 +00001169 dev_kfree_skb_any(skb);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001170
Ben Hutchings5988b632008-09-01 12:46:36 +01001171 /* Free the DMA mapping we were in the process of writing out */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001172 if (state.unmap_len) {
Ben Hutchings7668ff92012-05-17 20:52:20 +01001173 if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE)
Ben Hutchings0e33d872012-05-17 17:46:55 +01001174 dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr,
1175 state.unmap_len, DMA_TO_DEVICE);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001176 else
Ben Hutchings0e33d872012-05-17 17:46:55 +01001177 dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr,
1178 state.unmap_len, DMA_TO_DEVICE);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001179 }
Ben Hutchings5988b632008-09-01 12:46:36 +01001180
Ben Hutchingsdfa50be2013-03-08 21:20:09 +00001181 /* Free the header DMA mapping, if using option descriptors */
1182 if (state.header_unmap_len)
1183 dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr,
1184 state.header_unmap_len, DMA_TO_DEVICE);
1185
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001186 efx_enqueue_unwind(tx_queue);
Ben Hutchings14bf7182012-05-22 01:27:58 +01001187 return NETDEV_TX_OK;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001188}