blob: 87543476382d490f57aa01b08310f6d8ab562c19 [file] [log] [blame]
Ben Hutchings8ceee662008-04-27 12:55:59 +01001/****************************************************************************
Ben Hutchingsf7a6d2c2013-08-29 23:32:48 +01002 * Driver for Solarflare network controllers and boards
Ben Hutchings8ceee662008-04-27 12:55:59 +01003 * Copyright 2005-2006 Fen Systems Ltd.
Ben Hutchingsf7a6d2c2013-08-29 23:32:48 +01004 * Copyright 2005-2013 Solarflare Communications Inc.
Ben Hutchings8ceee662008-04-27 12:55:59 +01005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/pci.h>
12#include <linux/tcp.h>
13#include <linux/ip.h>
14#include <linux/in.h>
Ben Hutchings738a8f42009-11-29 15:16:05 +000015#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/slab.h>
Ben Hutchings738a8f42009-11-29 15:16:05 +000017#include <net/ipv6.h>
Ben Hutchings8ceee662008-04-27 12:55:59 +010018#include <linux/if_ether.h>
19#include <linux/highmem.h>
Ben Hutchings183233b2013-06-28 21:47:12 +010020#include <linux/cache.h>
Ben Hutchings8ceee662008-04-27 12:55:59 +010021#include "net_driver.h"
Ben Hutchings8ceee662008-04-27 12:55:59 +010022#include "efx.h"
Ben Hutchings183233b2013-06-28 21:47:12 +010023#include "io.h"
Ben Hutchings744093c2009-11-29 15:12:08 +000024#include "nic.h"
Ben Hutchings8ceee662008-04-27 12:55:59 +010025#include "workarounds.h"
Ben Hutchingsdfa50be2013-03-08 21:20:09 +000026#include "ef10_regs.h"
Ben Hutchings8ceee662008-04-27 12:55:59 +010027
Ben Hutchings183233b2013-06-28 21:47:12 +010028#ifdef EFX_USE_PIO
29
30#define EFX_PIOBUF_SIZE_MAX ER_DZ_TX_PIOBUF_SIZE
31#define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES)
32unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF;
33
34#endif /* EFX_USE_PIO */
35
Ben Hutchings4d566062008-09-01 12:47:12 +010036static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
Tom Herbertc3940992011-11-28 16:33:43 +000037 struct efx_tx_buffer *buffer,
38 unsigned int *pkts_compl,
39 unsigned int *bytes_compl)
Ben Hutchings8ceee662008-04-27 12:55:59 +010040{
41 if (buffer->unmap_len) {
Ben Hutchings0e33d872012-05-17 17:46:55 +010042 struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
Ben Hutchingscc12dac2008-09-01 12:46:43 +010043 dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
44 buffer->unmap_len);
Ben Hutchings7668ff92012-05-17 20:52:20 +010045 if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
Ben Hutchings0e33d872012-05-17 17:46:55 +010046 dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
47 DMA_TO_DEVICE);
Ben Hutchings8ceee662008-04-27 12:55:59 +010048 else
Ben Hutchings0e33d872012-05-17 17:46:55 +010049 dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
50 DMA_TO_DEVICE);
Ben Hutchings8ceee662008-04-27 12:55:59 +010051 buffer->unmap_len = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +010052 }
53
Ben Hutchings7668ff92012-05-17 20:52:20 +010054 if (buffer->flags & EFX_TX_BUF_SKB) {
Tom Herbertc3940992011-11-28 16:33:43 +000055 (*pkts_compl)++;
56 (*bytes_compl) += buffer->skb->len;
Ben Hutchings8ceee662008-04-27 12:55:59 +010057 dev_kfree_skb_any((struct sk_buff *) buffer->skb);
Ben Hutchings62776d02010-06-23 11:30:07 +000058 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
59 "TX queue %d transmission id %x complete\n",
60 tx_queue->queue, tx_queue->read_count);
Ben Hutchingsf7251a92012-05-17 18:40:54 +010061 } else if (buffer->flags & EFX_TX_BUF_HEAP) {
62 kfree(buffer->heap_buf);
Ben Hutchings8ceee662008-04-27 12:55:59 +010063 }
Ben Hutchings7668ff92012-05-17 20:52:20 +010064
Ben Hutchingsf7251a92012-05-17 18:40:54 +010065 buffer->len = 0;
66 buffer->flags = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +010067}
68
Ben Hutchingsb9b39b62008-05-07 12:51:12 +010069static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
Ben Hutchings740847d2008-09-01 12:48:23 +010070 struct sk_buff *skb);
Ben Hutchings8ceee662008-04-27 12:55:59 +010071
Ben Hutchings63f19882009-10-23 08:31:20 +000072static inline unsigned
73efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
74{
75 /* Depending on the NIC revision, we can use descriptor
76 * lengths up to 8K or 8K-1. However, since PCI Express
77 * devices must split read requests at 4K boundaries, there is
78 * little benefit from using descriptors that cross those
79 * boundaries and we keep things simple by not doing so.
80 */
Ben Hutchings5b6262d2012-02-02 21:21:15 +000081 unsigned len = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1;
Ben Hutchings63f19882009-10-23 08:31:20 +000082
83 /* Work around hardware bug for unaligned buffers. */
84 if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
85 len = min_t(unsigned, len, 512 - (dma_addr & 0xf));
86
87 return len;
88}
89
Ben Hutchings7e6d06f2012-07-30 15:57:44 +000090unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
91{
92 /* Header and payload descriptor for each output segment, plus
93 * one for every input fragment boundary within a segment
94 */
95 unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
96
Ben Hutchingsdfa50be2013-03-08 21:20:09 +000097 /* Possibly one more per segment for the alignment workaround,
98 * or for option descriptors
99 */
100 if (EFX_WORKAROUND_5391(efx) || efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
Ben Hutchings7e6d06f2012-07-30 15:57:44 +0000101 max_descs += EFX_TSO_MAX_SEGS;
102
103 /* Possibly more for PCIe page boundaries within input fragments */
104 if (PAGE_SIZE > EFX_PAGE_SIZE)
105 max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
106 DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
107
108 return max_descs;
109}
110
Ben Hutchings14bf7182012-05-22 01:27:58 +0100111/* Get partner of a TX queue, seen as part of the same net core queue */
112static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
113{
114 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
115 return tx_queue - EFX_TXQ_TYPE_OFFLOAD;
116 else
117 return tx_queue + EFX_TXQ_TYPE_OFFLOAD;
118}
119
120static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
121{
122 /* We need to consider both queues that the net core sees as one */
123 struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1);
124 struct efx_nic *efx = txq1->efx;
125 unsigned int fill_level;
126
127 fill_level = max(txq1->insert_count - txq1->old_read_count,
128 txq2->insert_count - txq2->old_read_count);
129 if (likely(fill_level < efx->txq_stop_thresh))
130 return;
131
132 /* We used the stale old_read_count above, which gives us a
133 * pessimistic estimate of the fill level (which may even
134 * validly be >= efx->txq_entries). Now try again using
135 * read_count (more likely to be a cache miss).
136 *
137 * If we read read_count and then conditionally stop the
138 * queue, it is possible for the completion path to race with
139 * us and complete all outstanding descriptors in the middle,
140 * after which there will be no more completions to wake it.
141 * Therefore we stop the queue first, then read read_count
142 * (with a memory barrier to ensure the ordering), then
143 * restart the queue if the fill level turns out to be low
144 * enough.
145 */
146 netif_tx_stop_queue(txq1->core_txq);
147 smp_mb();
148 txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
149 txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
150
151 fill_level = max(txq1->insert_count - txq1->old_read_count,
152 txq2->insert_count - txq2->old_read_count);
153 EFX_BUG_ON_PARANOID(fill_level >= efx->txq_entries);
154 if (likely(fill_level < efx->txq_stop_thresh)) {
155 smp_mb();
156 if (likely(!efx->loopback_selftest))
157 netif_tx_start_queue(txq1->core_txq);
158 }
159}
160
Ben Hutchings8ceee662008-04-27 12:55:59 +0100161/*
162 * Add a socket buffer to a TX queue
163 *
164 * This maps all fragments of a socket buffer for DMA and adds them to
165 * the TX queue. The queue's insert pointer will be incremented by
166 * the number of fragments in the socket buffer.
167 *
168 * If any DMA mapping fails, any mapped fragments will be unmapped,
169 * the queue's insert pointer will be restored to its original value.
170 *
Ben Hutchings497f5ba2009-11-23 16:07:05 +0000171 * This function is split out from efx_hard_start_xmit to allow the
172 * loopback test to direct packets via specific TX queues.
173 *
Ben Hutchings14bf7182012-05-22 01:27:58 +0100174 * Returns NETDEV_TX_OK.
Ben Hutchings8ceee662008-04-27 12:55:59 +0100175 * You must hold netif_tx_lock() to call this function.
176 */
Ben Hutchings497f5ba2009-11-23 16:07:05 +0000177netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100178{
179 struct efx_nic *efx = tx_queue->efx;
Ben Hutchings0e33d872012-05-17 17:46:55 +0100180 struct device *dma_dev = &efx->pci_dev->dev;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100181 struct efx_tx_buffer *buffer;
182 skb_frag_t *fragment;
Ben Hutchings14bf7182012-05-22 01:27:58 +0100183 unsigned int len, unmap_len = 0, insert_ptr;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100184 dma_addr_t dma_addr, unmap_addr = 0;
185 unsigned int dma_len;
Ben Hutchings7668ff92012-05-17 20:52:20 +0100186 unsigned short dma_flags;
Ben Hutchings14bf7182012-05-22 01:27:58 +0100187 int i = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100188
189 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
190
Ben Hutchings9bc183d2009-11-23 16:06:47 +0000191 if (skb_shinfo(skb)->gso_size)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100192 return efx_enqueue_skb_tso(tx_queue, skb);
193
Ben Hutchings8ceee662008-04-27 12:55:59 +0100194 /* Get size of the initial fragment */
195 len = skb_headlen(skb);
196
Ben Hutchingsbb145a92009-03-20 13:25:39 +0000197 /* Pad if necessary */
198 if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) {
199 EFX_BUG_ON_PARANOID(skb->data_len);
200 len = 32 + 1;
201 if (skb_pad(skb, len - skb->len))
202 return NETDEV_TX_OK;
203 }
204
Ben Hutchings0e33d872012-05-17 17:46:55 +0100205 /* Map for DMA. Use dma_map_single rather than dma_map_page
Ben Hutchings8ceee662008-04-27 12:55:59 +0100206 * since this is more efficient on machines with sparse
207 * memory.
208 */
Ben Hutchings7668ff92012-05-17 20:52:20 +0100209 dma_flags = EFX_TX_BUF_MAP_SINGLE;
Ben Hutchings0e33d872012-05-17 17:46:55 +0100210 dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100211
212 /* Process all fragments */
213 while (1) {
Ben Hutchings0e33d872012-05-17 17:46:55 +0100214 if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
215 goto dma_err;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100216
217 /* Store fields for marking in the per-fragment final
218 * descriptor */
219 unmap_len = len;
220 unmap_addr = dma_addr;
221
222 /* Add to TX queue, splitting across DMA boundaries */
223 do {
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000224 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100225 buffer = &tx_queue->buffer[insert_ptr];
Ben Hutchings7668ff92012-05-17 20:52:20 +0100226 EFX_BUG_ON_PARANOID(buffer->flags);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100227 EFX_BUG_ON_PARANOID(buffer->len);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100228 EFX_BUG_ON_PARANOID(buffer->unmap_len);
229
Ben Hutchings63f19882009-10-23 08:31:20 +0000230 dma_len = efx_max_tx_len(efx, dma_addr);
231 if (likely(dma_len >= len))
Ben Hutchings8ceee662008-04-27 12:55:59 +0100232 dma_len = len;
233
Ben Hutchings8ceee662008-04-27 12:55:59 +0100234 /* Fill out per descriptor fields */
235 buffer->len = dma_len;
236 buffer->dma_addr = dma_addr;
Ben Hutchings7668ff92012-05-17 20:52:20 +0100237 buffer->flags = EFX_TX_BUF_CONT;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100238 len -= dma_len;
239 dma_addr += dma_len;
240 ++tx_queue->insert_count;
241 } while (len);
242
243 /* Transfer ownership of the unmapping to the final buffer */
Ben Hutchings7668ff92012-05-17 20:52:20 +0100244 buffer->flags = EFX_TX_BUF_CONT | dma_flags;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100245 buffer->unmap_len = unmap_len;
246 unmap_len = 0;
247
248 /* Get address and size of next fragment */
249 if (i >= skb_shinfo(skb)->nr_frags)
250 break;
251 fragment = &skb_shinfo(skb)->frags[i];
Eric Dumazet9e903e02011-10-18 21:00:24 +0000252 len = skb_frag_size(fragment);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100253 i++;
254 /* Map for DMA */
Ben Hutchings7668ff92012-05-17 20:52:20 +0100255 dma_flags = 0;
Ben Hutchings0e33d872012-05-17 17:46:55 +0100256 dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
Ian Campbell5d6bcdf2011-10-06 11:10:48 +0100257 DMA_TO_DEVICE);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100258 }
259
260 /* Transfer ownership of the skb to the final buffer */
261 buffer->skb = skb;
Ben Hutchings7668ff92012-05-17 20:52:20 +0100262 buffer->flags = EFX_TX_BUF_SKB | dma_flags;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100263
Tom Herbertc3940992011-11-28 16:33:43 +0000264 netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
265
Ben Hutchings8ceee662008-04-27 12:55:59 +0100266 /* Pass off to hardware */
Ben Hutchings152b6a62009-11-29 03:43:56 +0000267 efx_nic_push_buffers(tx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100268
Ben Hutchings14bf7182012-05-22 01:27:58 +0100269 efx_tx_maybe_stop_queue(tx_queue);
270
Ben Hutchings8ceee662008-04-27 12:55:59 +0100271 return NETDEV_TX_OK;
272
Ben Hutchings0e33d872012-05-17 17:46:55 +0100273 dma_err:
Ben Hutchings62776d02010-06-23 11:30:07 +0000274 netif_err(efx, tx_err, efx->net_dev,
275 " TX queue %d could not map skb with %d bytes %d "
276 "fragments for DMA\n", tx_queue->queue, skb->len,
277 skb_shinfo(skb)->nr_frags + 1);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100278
279 /* Mark the packet as transmitted, and free the SKB ourselves */
Ben Hutchings9bc183d2009-11-23 16:06:47 +0000280 dev_kfree_skb_any(skb);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100281
Ben Hutchings8ceee662008-04-27 12:55:59 +0100282 /* Work backwards until we hit the original insert pointer value */
283 while (tx_queue->insert_count != tx_queue->write_count) {
Tom Herbertc3940992011-11-28 16:33:43 +0000284 unsigned int pkts_compl = 0, bytes_compl = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100285 --tx_queue->insert_count;
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000286 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100287 buffer = &tx_queue->buffer[insert_ptr];
Tom Herbertc3940992011-11-28 16:33:43 +0000288 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100289 }
290
291 /* Free the fragment we were mid-way through pushing */
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100292 if (unmap_len) {
Ben Hutchings7668ff92012-05-17 20:52:20 +0100293 if (dma_flags & EFX_TX_BUF_MAP_SINGLE)
Ben Hutchings0e33d872012-05-17 17:46:55 +0100294 dma_unmap_single(dma_dev, unmap_addr, unmap_len,
295 DMA_TO_DEVICE);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100296 else
Ben Hutchings0e33d872012-05-17 17:46:55 +0100297 dma_unmap_page(dma_dev, unmap_addr, unmap_len,
298 DMA_TO_DEVICE);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100299 }
Ben Hutchings8ceee662008-04-27 12:55:59 +0100300
Ben Hutchings14bf7182012-05-22 01:27:58 +0100301 return NETDEV_TX_OK;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100302}
303
304/* Remove packets from the TX queue
305 *
306 * This removes packets from the TX queue, up to and including the
307 * specified index.
308 */
Ben Hutchings4d566062008-09-01 12:47:12 +0100309static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
Tom Herbertc3940992011-11-28 16:33:43 +0000310 unsigned int index,
311 unsigned int *pkts_compl,
312 unsigned int *bytes_compl)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100313{
314 struct efx_nic *efx = tx_queue->efx;
315 unsigned int stop_index, read_ptr;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100316
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000317 stop_index = (index + 1) & tx_queue->ptr_mask;
318 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100319
320 while (read_ptr != stop_index) {
321 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
Ben Hutchingsba8977b2013-01-08 23:43:19 +0000322
323 if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
324 unlikely(buffer->len == 0)) {
Ben Hutchings62776d02010-06-23 11:30:07 +0000325 netif_err(efx, tx_err, efx->net_dev,
326 "TX queue %d spurious TX completion id %x\n",
327 tx_queue->queue, read_ptr);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100328 efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
329 return;
330 }
331
Tom Herbertc3940992011-11-28 16:33:43 +0000332 efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100333
334 ++tx_queue->read_count;
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000335 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100336 }
337}
338
Ben Hutchings8ceee662008-04-27 12:55:59 +0100339/* Initiate a packet transmission. We use one channel per CPU
340 * (sharing when we have more CPUs than channels). On Falcon, the TX
341 * completion events will be directed back to the CPU that transmitted
342 * the packet, which should be cache-efficient.
343 *
344 * Context: non-blocking.
345 * Note that returning anything other than NETDEV_TX_OK will cause the
346 * OS to free the skb.
347 */
Stephen Hemminger613573252009-08-31 19:50:58 +0000348netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
Ben Hutchings2d0cc562012-02-17 00:10:45 +0000349 struct net_device *net_dev)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100350{
Ben Hutchings767e4682008-09-01 12:43:14 +0100351 struct efx_nic *efx = netdev_priv(net_dev);
Ben Hutchings60ac1062008-09-01 12:44:59 +0100352 struct efx_tx_queue *tx_queue;
Ben Hutchings94b274b2011-01-10 21:18:20 +0000353 unsigned index, type;
Ben Hutchings60ac1062008-09-01 12:44:59 +0100354
Ben Hutchingse4abce82011-05-16 18:51:24 +0100355 EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
Ben Hutchingsa7ef5932009-03-04 09:52:37 +0000356
Stuart Hodgson7c236c42012-09-03 11:09:36 +0100357 /* PTP "event" packet */
358 if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
359 unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
360 return efx_ptp_tx(efx, skb);
361 }
362
Ben Hutchings94b274b2011-01-10 21:18:20 +0000363 index = skb_get_queue_mapping(skb);
364 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
365 if (index >= efx->n_tx_channels) {
366 index -= efx->n_tx_channels;
367 type |= EFX_TXQ_TYPE_HIGHPRI;
368 }
369 tx_queue = efx_get_tx_queue(efx, index, type);
Ben Hutchings60ac1062008-09-01 12:44:59 +0100370
Ben Hutchings497f5ba2009-11-23 16:07:05 +0000371 return efx_enqueue_skb(tx_queue, skb);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100372}
373
Ben Hutchings60031fc2011-01-12 18:39:40 +0000374void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
375{
Ben Hutchings94b274b2011-01-10 21:18:20 +0000376 struct efx_nic *efx = tx_queue->efx;
377
Ben Hutchings60031fc2011-01-12 18:39:40 +0000378 /* Must be inverse of queue lookup in efx_hard_start_xmit() */
Ben Hutchings94b274b2011-01-10 21:18:20 +0000379 tx_queue->core_txq =
380 netdev_get_tx_queue(efx->net_dev,
381 tx_queue->queue / EFX_TXQ_TYPES +
382 ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
383 efx->n_tx_channels : 0));
384}
385
386int efx_setup_tc(struct net_device *net_dev, u8 num_tc)
387{
388 struct efx_nic *efx = netdev_priv(net_dev);
389 struct efx_channel *channel;
390 struct efx_tx_queue *tx_queue;
391 unsigned tc;
392 int rc;
393
394 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
395 return -EINVAL;
396
397 if (num_tc == net_dev->num_tc)
398 return 0;
399
400 for (tc = 0; tc < num_tc; tc++) {
401 net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
402 net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
403 }
404
405 if (num_tc > net_dev->num_tc) {
406 /* Initialise high-priority queues as necessary */
407 efx_for_each_channel(channel, efx) {
408 efx_for_each_possible_channel_tx_queue(tx_queue,
409 channel) {
410 if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
411 continue;
412 if (!tx_queue->buffer) {
413 rc = efx_probe_tx_queue(tx_queue);
414 if (rc)
415 return rc;
416 }
417 if (!tx_queue->initialised)
418 efx_init_tx_queue(tx_queue);
419 efx_init_tx_queue_core_txq(tx_queue);
420 }
421 }
422 } else {
423 /* Reduce number of classes before number of queues */
424 net_dev->num_tc = num_tc;
425 }
426
427 rc = netif_set_real_num_tx_queues(net_dev,
428 max_t(int, num_tc, 1) *
429 efx->n_tx_channels);
430 if (rc)
431 return rc;
432
433 /* Do not destroy high-priority queues when they become
434 * unused. We would have to flush them first, and it is
435 * fairly difficult to flush a subset of TX queues. Leave
436 * it to efx_fini_channels().
437 */
438
439 net_dev->num_tc = num_tc;
440 return 0;
Ben Hutchings60031fc2011-01-12 18:39:40 +0000441}
442
Ben Hutchings8ceee662008-04-27 12:55:59 +0100443void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
444{
445 unsigned fill_level;
446 struct efx_nic *efx = tx_queue->efx;
Ben Hutchings14bf7182012-05-22 01:27:58 +0100447 struct efx_tx_queue *txq2;
Tom Herbertc3940992011-11-28 16:33:43 +0000448 unsigned int pkts_compl = 0, bytes_compl = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100449
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000450 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100451
Tom Herbertc3940992011-11-28 16:33:43 +0000452 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
453 netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100454
Ben Hutchings02e12162013-04-27 01:55:21 +0100455 if (pkts_compl > 1)
456 ++tx_queue->merge_events;
457
Ben Hutchings14bf7182012-05-22 01:27:58 +0100458 /* See if we need to restart the netif queue. This memory
459 * barrier ensures that we write read_count (inside
460 * efx_dequeue_buffers()) before reading the queue status.
461 */
Ben Hutchings8ceee662008-04-27 12:55:59 +0100462 smp_mb();
Ben Hutchingsc04bfc62010-12-10 01:24:16 +0000463 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
Neil Turton9d1aea62011-04-04 13:46:23 +0100464 likely(efx->port_enabled) &&
Ben Hutchingse4abce82011-05-16 18:51:24 +0100465 likely(netif_device_present(efx->net_dev))) {
Ben Hutchings14bf7182012-05-22 01:27:58 +0100466 txq2 = efx_tx_queue_partner(tx_queue);
467 fill_level = max(tx_queue->insert_count - tx_queue->read_count,
468 txq2->insert_count - txq2->read_count);
469 if (fill_level <= efx->txq_wake_thresh)
Ben Hutchingsc04bfc62010-12-10 01:24:16 +0000470 netif_tx_wake_queue(tx_queue->core_txq);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100471 }
Ben Hutchingscd385572010-11-15 23:53:11 +0000472
473 /* Check whether the hardware queue is now empty */
474 if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
475 tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
476 if (tx_queue->read_count == tx_queue->old_write_count) {
477 smp_mb();
478 tx_queue->empty_read_count =
479 tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
480 }
481 }
Ben Hutchings8ceee662008-04-27 12:55:59 +0100482}
483
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100484/* Size of page-based TSO header buffers. Larger blocks must be
485 * allocated from the heap.
486 */
487#define TSOH_STD_SIZE 128
488#define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE)
489
490/* At most half the descriptors in the queue at any time will refer to
491 * a TSO header buffer, since they must always be followed by a
492 * payload descriptor referring to an skb.
493 */
494static unsigned int efx_tsoh_page_count(struct efx_tx_queue *tx_queue)
495{
496 return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 2 * TSOH_PER_PAGE);
497}
498
Ben Hutchings8ceee662008-04-27 12:55:59 +0100499int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
500{
501 struct efx_nic *efx = tx_queue->efx;
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000502 unsigned int entries;
Ben Hutchings7668ff92012-05-17 20:52:20 +0100503 int rc;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100504
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000505 /* Create the smallest power-of-two aligned ring */
506 entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
507 EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
508 tx_queue->ptr_mask = entries - 1;
509
510 netif_dbg(efx, probe, efx->net_dev,
511 "creating TX queue %d size %#x mask %#x\n",
512 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100513
514 /* Allocate software ring */
Thomas Meyerc2e4e252011-12-02 12:36:13 +0000515 tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000516 GFP_KERNEL);
Ben Hutchings60ac1062008-09-01 12:44:59 +0100517 if (!tx_queue->buffer)
518 return -ENOMEM;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100519
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100520 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) {
521 tx_queue->tsoh_page =
522 kcalloc(efx_tsoh_page_count(tx_queue),
523 sizeof(tx_queue->tsoh_page[0]), GFP_KERNEL);
524 if (!tx_queue->tsoh_page) {
525 rc = -ENOMEM;
526 goto fail1;
527 }
528 }
529
Ben Hutchings8ceee662008-04-27 12:55:59 +0100530 /* Allocate hardware ring */
Ben Hutchings152b6a62009-11-29 03:43:56 +0000531 rc = efx_nic_probe_tx(tx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100532 if (rc)
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100533 goto fail2;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100534
535 return 0;
536
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100537fail2:
538 kfree(tx_queue->tsoh_page);
539 tx_queue->tsoh_page = NULL;
540fail1:
Ben Hutchings8ceee662008-04-27 12:55:59 +0100541 kfree(tx_queue->buffer);
542 tx_queue->buffer = NULL;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100543 return rc;
544}
545
Ben Hutchingsbc3c90a2008-09-01 12:48:46 +0100546void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100547{
Ben Hutchings62776d02010-06-23 11:30:07 +0000548 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
549 "initialising TX queue %d\n", tx_queue->queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100550
551 tx_queue->insert_count = 0;
552 tx_queue->write_count = 0;
Ben Hutchingscd385572010-11-15 23:53:11 +0000553 tx_queue->old_write_count = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100554 tx_queue->read_count = 0;
555 tx_queue->old_read_count = 0;
Ben Hutchingscd385572010-11-15 23:53:11 +0000556 tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100557
558 /* Set up TX descriptor ring */
Ben Hutchings152b6a62009-11-29 03:43:56 +0000559 efx_nic_init_tx(tx_queue);
Ben Hutchings94b274b2011-01-10 21:18:20 +0000560
561 tx_queue->initialised = true;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100562}
563
Ben Hutchingse42c3d82013-05-27 16:52:54 +0100564void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100565{
566 struct efx_tx_buffer *buffer;
567
Ben Hutchingse42c3d82013-05-27 16:52:54 +0100568 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
569 "shutting down TX queue %d\n", tx_queue->queue);
570
Ben Hutchings8ceee662008-04-27 12:55:59 +0100571 if (!tx_queue->buffer)
572 return;
573
574 /* Free any buffers left in the ring */
575 while (tx_queue->read_count != tx_queue->write_count) {
Tom Herbertc3940992011-11-28 16:33:43 +0000576 unsigned int pkts_compl = 0, bytes_compl = 0;
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000577 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
Tom Herbertc3940992011-11-28 16:33:43 +0000578 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100579
580 ++tx_queue->read_count;
581 }
Tom Herbertc3940992011-11-28 16:33:43 +0000582 netdev_tx_reset_queue(tx_queue->core_txq);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100583}
584
Ben Hutchings8ceee662008-04-27 12:55:59 +0100585void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
586{
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100587 int i;
588
Ben Hutchings94b274b2011-01-10 21:18:20 +0000589 if (!tx_queue->buffer)
590 return;
591
Ben Hutchings62776d02010-06-23 11:30:07 +0000592 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
593 "destroying TX queue %d\n", tx_queue->queue);
Ben Hutchings152b6a62009-11-29 03:43:56 +0000594 efx_nic_remove_tx(tx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100595
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100596 if (tx_queue->tsoh_page) {
597 for (i = 0; i < efx_tsoh_page_count(tx_queue); i++)
598 efx_nic_free_buffer(tx_queue->efx,
599 &tx_queue->tsoh_page[i]);
600 kfree(tx_queue->tsoh_page);
601 tx_queue->tsoh_page = NULL;
602 }
603
Ben Hutchings8ceee662008-04-27 12:55:59 +0100604 kfree(tx_queue->buffer);
605 tx_queue->buffer = NULL;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100606}
607
608
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100609/* Efx TCP segmentation acceleration.
610 *
611 * Why? Because by doing it here in the driver we can go significantly
612 * faster than the GSO.
613 *
614 * Requires TX checksum offload support.
615 */
616
617/* Number of bytes inserted at the start of a TSO header buffer,
618 * similar to NET_IP_ALIGN.
619 */
Ben Hutchings13e9ab12008-09-01 12:50:28 +0100620#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100621#define TSOH_OFFSET 0
622#else
623#define TSOH_OFFSET NET_IP_ALIGN
624#endif
625
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100626#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100627
628/**
629 * struct tso_state - TSO state for an SKB
Ben Hutchings23d9e602008-09-01 12:47:02 +0100630 * @out_len: Remaining length in current segment
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100631 * @seqnum: Current sequence number
Ben Hutchings23d9e602008-09-01 12:47:02 +0100632 * @ipv4_id: Current IPv4 ID, host endian
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100633 * @packet_space: Remaining space in current packet
Ben Hutchings23d9e602008-09-01 12:47:02 +0100634 * @dma_addr: DMA address of current position
635 * @in_len: Remaining length in current SKB fragment
636 * @unmap_len: Length of SKB fragment
637 * @unmap_addr: DMA address of SKB fragment
Ben Hutchings7668ff92012-05-17 20:52:20 +0100638 * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0
Ben Hutchings738a8f42009-11-29 15:16:05 +0000639 * @protocol: Network protocol (after any VLAN header)
Ben Hutchings97142842012-06-22 02:44:01 +0100640 * @ip_off: Offset of IP header
641 * @tcp_off: Offset of TCP header
Ben Hutchings23d9e602008-09-01 12:47:02 +0100642 * @header_len: Number of bytes of header
Ben Hutchings53cb13c2012-06-19 20:03:41 +0100643 * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000644 * @header_dma_addr: Header DMA address, when using option descriptors
645 * @header_unmap_len: Header DMA mapped length, or 0 if not using option
646 * descriptors
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100647 *
648 * The state used during segmentation. It is put into this data structure
649 * just to make it easy to pass into inline functions.
650 */
651struct tso_state {
Ben Hutchings23d9e602008-09-01 12:47:02 +0100652 /* Output position */
653 unsigned out_len;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100654 unsigned seqnum;
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000655 u16 ipv4_id;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100656 unsigned packet_space;
657
Ben Hutchings23d9e602008-09-01 12:47:02 +0100658 /* Input position */
659 dma_addr_t dma_addr;
660 unsigned in_len;
661 unsigned unmap_len;
662 dma_addr_t unmap_addr;
Ben Hutchings7668ff92012-05-17 20:52:20 +0100663 unsigned short dma_flags;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100664
Ben Hutchings738a8f42009-11-29 15:16:05 +0000665 __be16 protocol;
Ben Hutchings97142842012-06-22 02:44:01 +0100666 unsigned int ip_off;
667 unsigned int tcp_off;
Ben Hutchings23d9e602008-09-01 12:47:02 +0100668 unsigned header_len;
Ben Hutchings53cb13c2012-06-19 20:03:41 +0100669 unsigned int ip_base_len;
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000670 dma_addr_t header_dma_addr;
671 unsigned int header_unmap_len;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100672};
673
674
675/*
676 * Verify that our various assumptions about sk_buffs and the conditions
Ben Hutchings738a8f42009-11-29 15:16:05 +0000677 * under which TSO will be attempted hold true. Return the protocol number.
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100678 */
Ben Hutchings738a8f42009-11-29 15:16:05 +0000679static __be16 efx_tso_check_protocol(struct sk_buff *skb)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100680{
Ben Hutchings740847d2008-09-01 12:48:23 +0100681 __be16 protocol = skb->protocol;
682
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100683 EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
Ben Hutchings740847d2008-09-01 12:48:23 +0100684 protocol);
685 if (protocol == htons(ETH_P_8021Q)) {
Ben Hutchings740847d2008-09-01 12:48:23 +0100686 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
687 protocol = veh->h_vlan_encapsulated_proto;
Ben Hutchings740847d2008-09-01 12:48:23 +0100688 }
689
Ben Hutchings738a8f42009-11-29 15:16:05 +0000690 if (protocol == htons(ETH_P_IP)) {
691 EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
692 } else {
693 EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6));
694 EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP);
695 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100696 EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
697 + (tcp_hdr(skb)->doff << 2u)) >
698 skb_headlen(skb));
Ben Hutchings738a8f42009-11-29 15:16:05 +0000699
700 return protocol;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100701}
702
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100703static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
704 struct efx_tx_buffer *buffer, unsigned int len)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100705{
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100706 u8 *result;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100707
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100708 EFX_BUG_ON_PARANOID(buffer->len);
709 EFX_BUG_ON_PARANOID(buffer->flags);
710 EFX_BUG_ON_PARANOID(buffer->unmap_len);
711
712 if (likely(len <= TSOH_STD_SIZE - TSOH_OFFSET)) {
713 unsigned index =
714 (tx_queue->insert_count & tx_queue->ptr_mask) / 2;
715 struct efx_buffer *page_buf =
716 &tx_queue->tsoh_page[index / TSOH_PER_PAGE];
717 unsigned offset =
718 TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET;
719
720 if (unlikely(!page_buf->addr) &&
Ben Hutchings0d19a542012-09-18 21:59:52 +0100721 efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
722 GFP_ATOMIC))
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100723 return NULL;
724
725 result = (u8 *)page_buf->addr + offset;
726 buffer->dma_addr = page_buf->dma_addr + offset;
727 buffer->flags = EFX_TX_BUF_CONT;
728 } else {
729 tx_queue->tso_long_headers++;
730
731 buffer->heap_buf = kmalloc(TSOH_OFFSET + len, GFP_ATOMIC);
732 if (unlikely(!buffer->heap_buf))
733 return NULL;
734 result = (u8 *)buffer->heap_buf + TSOH_OFFSET;
735 buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100736 }
737
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100738 buffer->len = len;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100739
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100740 return result;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100741}
742
743/**
744 * efx_tx_queue_insert - push descriptors onto the TX queue
745 * @tx_queue: Efx TX queue
746 * @dma_addr: DMA address of fragment
747 * @len: Length of fragment
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100748 * @final_buffer: The final buffer inserted into the queue
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100749 *
Ben Hutchings14bf7182012-05-22 01:27:58 +0100750 * Push descriptors onto the TX queue.
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100751 */
Ben Hutchings14bf7182012-05-22 01:27:58 +0100752static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
753 dma_addr_t dma_addr, unsigned len,
754 struct efx_tx_buffer **final_buffer)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100755{
756 struct efx_tx_buffer *buffer;
757 struct efx_nic *efx = tx_queue->efx;
Ben Hutchings14bf7182012-05-22 01:27:58 +0100758 unsigned dma_len, insert_ptr;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100759
760 EFX_BUG_ON_PARANOID(len <= 0);
761
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100762 while (1) {
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000763 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100764 buffer = &tx_queue->buffer[insert_ptr];
765 ++tx_queue->insert_count;
766
767 EFX_BUG_ON_PARANOID(tx_queue->insert_count -
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000768 tx_queue->read_count >=
769 efx->txq_entries);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100770
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100771 EFX_BUG_ON_PARANOID(buffer->len);
772 EFX_BUG_ON_PARANOID(buffer->unmap_len);
Ben Hutchings7668ff92012-05-17 20:52:20 +0100773 EFX_BUG_ON_PARANOID(buffer->flags);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100774
775 buffer->dma_addr = dma_addr;
776
Ben Hutchings63f19882009-10-23 08:31:20 +0000777 dma_len = efx_max_tx_len(efx, dma_addr);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100778
779 /* If there is enough space to send then do so */
780 if (dma_len >= len)
781 break;
782
Ben Hutchings7668ff92012-05-17 20:52:20 +0100783 buffer->len = dma_len;
784 buffer->flags = EFX_TX_BUF_CONT;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100785 dma_addr += dma_len;
786 len -= dma_len;
787 }
788
789 EFX_BUG_ON_PARANOID(!len);
790 buffer->len = len;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100791 *final_buffer = buffer;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100792}
793
794
795/*
796 * Put a TSO header into the TX queue.
797 *
798 * This is special-cased because we know that it is small enough to fit in
799 * a single fragment, and we know it doesn't cross a page boundary. It
800 * also allows us to not worry about end-of-packet etc.
801 */
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100802static int efx_tso_put_header(struct efx_tx_queue *tx_queue,
803 struct efx_tx_buffer *buffer, u8 *header)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100804{
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100805 if (unlikely(buffer->flags & EFX_TX_BUF_HEAP)) {
806 buffer->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
807 header, buffer->len,
808 DMA_TO_DEVICE);
809 if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
810 buffer->dma_addr))) {
811 kfree(buffer->heap_buf);
812 buffer->len = 0;
813 buffer->flags = 0;
814 return -ENOMEM;
815 }
816 buffer->unmap_len = buffer->len;
817 buffer->flags |= EFX_TX_BUF_MAP_SINGLE;
818 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100819
820 ++tx_queue->insert_count;
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100821 return 0;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100822}
823
824
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100825/* Remove buffers put into a tx_queue. None of the buffers must have
826 * an skb attached.
827 */
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100828static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
829{
830 struct efx_tx_buffer *buffer;
831
832 /* Work backwards until we hit the original insert pointer value */
833 while (tx_queue->insert_count != tx_queue->write_count) {
834 --tx_queue->insert_count;
835 buffer = &tx_queue->buffer[tx_queue->insert_count &
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000836 tx_queue->ptr_mask];
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100837 efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100838 }
839}
840
841
842/* Parse the SKB header and initialise state. */
Ben Hutchingsc78c39e2013-03-08 20:03:17 +0000843static int tso_start(struct tso_state *st, struct efx_nic *efx,
844 const struct sk_buff *skb)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100845{
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000846 bool use_options = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
847 struct device *dma_dev = &efx->pci_dev->dev;
Ben Hutchingsc78c39e2013-03-08 20:03:17 +0000848 unsigned int header_len, in_len;
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000849 dma_addr_t dma_addr;
Ben Hutchingsc78c39e2013-03-08 20:03:17 +0000850
Ben Hutchings97142842012-06-22 02:44:01 +0100851 st->ip_off = skb_network_header(skb) - skb->data;
852 st->tcp_off = skb_transport_header(skb) - skb->data;
Ben Hutchingsc78c39e2013-03-08 20:03:17 +0000853 header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
854 in_len = skb_headlen(skb) - header_len;
855 st->header_len = header_len;
856 st->in_len = in_len;
Ben Hutchings53cb13c2012-06-19 20:03:41 +0100857 if (st->protocol == htons(ETH_P_IP)) {
Ben Hutchings97142842012-06-22 02:44:01 +0100858 st->ip_base_len = st->header_len - st->ip_off;
Ben Hutchings738a8f42009-11-29 15:16:05 +0000859 st->ipv4_id = ntohs(ip_hdr(skb)->id);
Ben Hutchings53cb13c2012-06-19 20:03:41 +0100860 } else {
Ben Hutchings97142842012-06-22 02:44:01 +0100861 st->ip_base_len = st->header_len - st->tcp_off;
Ben Hutchings738a8f42009-11-29 15:16:05 +0000862 st->ipv4_id = 0;
Ben Hutchings53cb13c2012-06-19 20:03:41 +0100863 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100864 st->seqnum = ntohl(tcp_hdr(skb)->seq);
865
866 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
867 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
868 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
869
Ben Hutchingsc78c39e2013-03-08 20:03:17 +0000870 st->out_len = skb->len - header_len;
871
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000872 if (!use_options) {
873 st->header_unmap_len = 0;
874
875 if (likely(in_len == 0)) {
876 st->dma_flags = 0;
877 st->unmap_len = 0;
878 return 0;
879 }
880
881 dma_addr = dma_map_single(dma_dev, skb->data + header_len,
882 in_len, DMA_TO_DEVICE);
883 st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
884 st->dma_addr = dma_addr;
885 st->unmap_addr = dma_addr;
886 st->unmap_len = in_len;
887 } else {
888 dma_addr = dma_map_single(dma_dev, skb->data,
889 skb_headlen(skb), DMA_TO_DEVICE);
890 st->header_dma_addr = dma_addr;
891 st->header_unmap_len = skb_headlen(skb);
Ben Hutchingsc78c39e2013-03-08 20:03:17 +0000892 st->dma_flags = 0;
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000893 st->dma_addr = dma_addr + header_len;
894 st->unmap_len = 0;
Ben Hutchingsc78c39e2013-03-08 20:03:17 +0000895 }
896
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000897 return unlikely(dma_mapping_error(dma_dev, dma_addr)) ? -ENOMEM : 0;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100898}
899
Ben Hutchings4d566062008-09-01 12:47:12 +0100900static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
901 skb_frag_t *frag)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100902{
Ian Campbell4a22c4c2011-09-21 21:53:16 +0000903 st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000904 skb_frag_size(frag), DMA_TO_DEVICE);
Ian Campbell5d6bcdf2011-10-06 11:10:48 +0100905 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
Ben Hutchings7668ff92012-05-17 20:52:20 +0100906 st->dma_flags = 0;
Eric Dumazet9e903e02011-10-18 21:00:24 +0000907 st->unmap_len = skb_frag_size(frag);
908 st->in_len = skb_frag_size(frag);
Ben Hutchings23d9e602008-09-01 12:47:02 +0100909 st->dma_addr = st->unmap_addr;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100910 return 0;
911 }
912 return -ENOMEM;
913}
914
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100915
916/**
917 * tso_fill_packet_with_fragment - form descriptors for the current fragment
918 * @tx_queue: Efx TX queue
919 * @skb: Socket buffer
920 * @st: TSO state
921 *
922 * Form descriptors for the current fragment, until we reach the end
Ben Hutchings14bf7182012-05-22 01:27:58 +0100923 * of fragment or end-of-packet.
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100924 */
Ben Hutchings14bf7182012-05-22 01:27:58 +0100925static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
926 const struct sk_buff *skb,
927 struct tso_state *st)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100928{
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100929 struct efx_tx_buffer *buffer;
Ben Hutchings14bf7182012-05-22 01:27:58 +0100930 int n;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100931
Ben Hutchings23d9e602008-09-01 12:47:02 +0100932 if (st->in_len == 0)
Ben Hutchings14bf7182012-05-22 01:27:58 +0100933 return;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100934 if (st->packet_space == 0)
Ben Hutchings14bf7182012-05-22 01:27:58 +0100935 return;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100936
Ben Hutchings23d9e602008-09-01 12:47:02 +0100937 EFX_BUG_ON_PARANOID(st->in_len <= 0);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100938 EFX_BUG_ON_PARANOID(st->packet_space <= 0);
939
Ben Hutchings23d9e602008-09-01 12:47:02 +0100940 n = min(st->in_len, st->packet_space);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100941
942 st->packet_space -= n;
Ben Hutchings23d9e602008-09-01 12:47:02 +0100943 st->out_len -= n;
944 st->in_len -= n;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100945
Ben Hutchings14bf7182012-05-22 01:27:58 +0100946 efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100947
Ben Hutchings14bf7182012-05-22 01:27:58 +0100948 if (st->out_len == 0) {
949 /* Transfer ownership of the skb */
950 buffer->skb = skb;
951 buffer->flags = EFX_TX_BUF_SKB;
952 } else if (st->packet_space != 0) {
953 buffer->flags = EFX_TX_BUF_CONT;
954 }
955
956 if (st->in_len == 0) {
957 /* Transfer ownership of the DMA mapping */
958 buffer->unmap_len = st->unmap_len;
959 buffer->flags |= st->dma_flags;
960 st->unmap_len = 0;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100961 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100962
Ben Hutchings23d9e602008-09-01 12:47:02 +0100963 st->dma_addr += n;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100964}
965
966
967/**
968 * tso_start_new_packet - generate a new header and prepare for the new packet
969 * @tx_queue: Efx TX queue
970 * @skb: Socket buffer
971 * @st: TSO state
972 *
973 * Generate a new header and prepare for the new packet. Return 0 on
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100974 * success, or -%ENOMEM if failed to alloc header.
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100975 */
Ben Hutchings4d566062008-09-01 12:47:12 +0100976static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
977 const struct sk_buff *skb,
978 struct tso_state *st)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100979{
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100980 struct efx_tx_buffer *buffer =
981 &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000982 bool is_last = st->out_len <= skb_shinfo(skb)->gso_size;
983 u8 tcp_flags_clear;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100984
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000985 if (!is_last) {
Ben Hutchings53cb13c2012-06-19 20:03:41 +0100986 st->packet_space = skb_shinfo(skb)->gso_size;
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000987 tcp_flags_clear = 0x09; /* mask out FIN and PSH */
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100988 } else {
Ben Hutchings53cb13c2012-06-19 20:03:41 +0100989 st->packet_space = st->out_len;
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000990 tcp_flags_clear = 0x00;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100991 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100992
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000993 if (!st->header_unmap_len) {
994 /* Allocate and insert a DMA-mapped header buffer. */
995 struct tcphdr *tsoh_th;
996 unsigned ip_length;
997 u8 *header;
998 int rc;
Ben Hutchings738a8f42009-11-29 15:16:05 +0000999
Ben Hutchingsdfa50be2013-03-08 21:20:09 +00001000 header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
1001 if (!header)
1002 return -ENOMEM;
Ben Hutchings738a8f42009-11-29 15:16:05 +00001003
Ben Hutchingsdfa50be2013-03-08 21:20:09 +00001004 tsoh_th = (struct tcphdr *)(header + st->tcp_off);
1005
1006 /* Copy and update the headers. */
1007 memcpy(header, skb->data, st->header_len);
1008
1009 tsoh_th->seq = htonl(st->seqnum);
1010 ((u8 *)tsoh_th)[13] &= ~tcp_flags_clear;
1011
1012 ip_length = st->ip_base_len + st->packet_space;
1013
1014 if (st->protocol == htons(ETH_P_IP)) {
1015 struct iphdr *tsoh_iph =
1016 (struct iphdr *)(header + st->ip_off);
1017
1018 tsoh_iph->tot_len = htons(ip_length);
1019 tsoh_iph->id = htons(st->ipv4_id);
1020 } else {
1021 struct ipv6hdr *tsoh_iph =
1022 (struct ipv6hdr *)(header + st->ip_off);
1023
1024 tsoh_iph->payload_len = htons(ip_length);
1025 }
1026
1027 rc = efx_tso_put_header(tx_queue, buffer, header);
1028 if (unlikely(rc))
1029 return rc;
Ben Hutchings738a8f42009-11-29 15:16:05 +00001030 } else {
Ben Hutchingsdfa50be2013-03-08 21:20:09 +00001031 /* Send the original headers with a TSO option descriptor
1032 * in front
1033 */
1034 u8 tcp_flags = ((u8 *)tcp_hdr(skb))[13] & ~tcp_flags_clear;
Ben Hutchings738a8f42009-11-29 15:16:05 +00001035
Ben Hutchingsdfa50be2013-03-08 21:20:09 +00001036 buffer->flags = EFX_TX_BUF_OPTION;
1037 buffer->len = 0;
1038 buffer->unmap_len = 0;
1039 EFX_POPULATE_QWORD_5(buffer->option,
1040 ESF_DZ_TX_DESC_IS_OPT, 1,
1041 ESF_DZ_TX_OPTION_TYPE,
1042 ESE_DZ_TX_OPTION_DESC_TSO,
1043 ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
1044 ESF_DZ_TX_TSO_IP_ID, st->ipv4_id,
1045 ESF_DZ_TX_TSO_TCP_SEQNO, st->seqnum);
1046 ++tx_queue->insert_count;
1047
1048 /* We mapped the headers in tso_start(). Unmap them
1049 * when the last segment is completed.
1050 */
1051 buffer = &tx_queue->buffer[tx_queue->insert_count &
1052 tx_queue->ptr_mask];
1053 buffer->dma_addr = st->header_dma_addr;
1054 buffer->len = st->header_len;
1055 if (is_last) {
1056 buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_MAP_SINGLE;
1057 buffer->unmap_len = st->header_unmap_len;
1058 /* Ensure we only unmap them once in case of a
1059 * later DMA mapping error and rollback
1060 */
1061 st->header_unmap_len = 0;
1062 } else {
1063 buffer->flags = EFX_TX_BUF_CONT;
1064 buffer->unmap_len = 0;
1065 }
1066 ++tx_queue->insert_count;
Ben Hutchings738a8f42009-11-29 15:16:05 +00001067 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001068
Ben Hutchingsdfa50be2013-03-08 21:20:09 +00001069 st->seqnum += skb_shinfo(skb)->gso_size;
1070
1071 /* Linux leaves suitable gaps in the IP ID space for us to fill. */
1072 ++st->ipv4_id;
Ben Hutchingsf7251a92012-05-17 18:40:54 +01001073
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001074 ++tx_queue->tso_packets;
1075
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001076 return 0;
1077}
1078
1079
1080/**
1081 * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
1082 * @tx_queue: Efx TX queue
1083 * @skb: Socket buffer
1084 *
1085 * Context: You must hold netif_tx_lock() to call this function.
1086 *
1087 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1088 * @skb was not enqueued. In all cases @skb is consumed. Return
Ben Hutchings14bf7182012-05-22 01:27:58 +01001089 * %NETDEV_TX_OK.
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001090 */
1091static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
Ben Hutchings740847d2008-09-01 12:48:23 +01001092 struct sk_buff *skb)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001093{
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001094 struct efx_nic *efx = tx_queue->efx;
Ben Hutchings14bf7182012-05-22 01:27:58 +01001095 int frag_i, rc;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001096 struct tso_state state;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001097
Ben Hutchings738a8f42009-11-29 15:16:05 +00001098 /* Find the packet protocol and sanity-check it */
1099 state.protocol = efx_tso_check_protocol(skb);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001100
1101 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
1102
Ben Hutchingsc78c39e2013-03-08 20:03:17 +00001103 rc = tso_start(&state, efx, skb);
1104 if (rc)
1105 goto mem_err;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001106
Ben Hutchingsc78c39e2013-03-08 20:03:17 +00001107 if (likely(state.in_len == 0)) {
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001108 /* Grab the first payload fragment. */
1109 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
1110 frag_i = 0;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001111 rc = tso_get_fragment(&state, efx,
1112 skb_shinfo(skb)->frags + frag_i);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001113 if (rc)
1114 goto mem_err;
1115 } else {
Ben Hutchingsc78c39e2013-03-08 20:03:17 +00001116 /* Payload starts in the header area. */
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001117 frag_i = -1;
1118 }
1119
1120 if (tso_start_new_packet(tx_queue, skb, &state) < 0)
1121 goto mem_err;
1122
1123 while (1) {
Ben Hutchings14bf7182012-05-22 01:27:58 +01001124 tso_fill_packet_with_fragment(tx_queue, skb, &state);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001125
1126 /* Move onto the next fragment? */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001127 if (state.in_len == 0) {
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001128 if (++frag_i >= skb_shinfo(skb)->nr_frags)
1129 /* End of payload reached. */
1130 break;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001131 rc = tso_get_fragment(&state, efx,
1132 skb_shinfo(skb)->frags + frag_i);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001133 if (rc)
1134 goto mem_err;
1135 }
1136
1137 /* Start at new packet? */
1138 if (state.packet_space == 0 &&
1139 tso_start_new_packet(tx_queue, skb, &state) < 0)
1140 goto mem_err;
1141 }
1142
Eric Dumazet449fa022011-11-30 17:12:27 -05001143 netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
1144
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001145 /* Pass off to hardware */
Ben Hutchings152b6a62009-11-29 03:43:56 +00001146 efx_nic_push_buffers(tx_queue);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001147
Ben Hutchings14bf7182012-05-22 01:27:58 +01001148 efx_tx_maybe_stop_queue(tx_queue);
1149
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001150 tx_queue->tso_bursts++;
1151 return NETDEV_TX_OK;
1152
1153 mem_err:
Ben Hutchings62776d02010-06-23 11:30:07 +00001154 netif_err(efx, tx_err, efx->net_dev,
Ben Hutchings0e33d872012-05-17 17:46:55 +01001155 "Out of memory for TSO headers, or DMA mapping error\n");
Ben Hutchings9bc183d2009-11-23 16:06:47 +00001156 dev_kfree_skb_any(skb);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001157
Ben Hutchings5988b632008-09-01 12:46:36 +01001158 /* Free the DMA mapping we were in the process of writing out */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001159 if (state.unmap_len) {
Ben Hutchings7668ff92012-05-17 20:52:20 +01001160 if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE)
Ben Hutchings0e33d872012-05-17 17:46:55 +01001161 dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr,
1162 state.unmap_len, DMA_TO_DEVICE);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001163 else
Ben Hutchings0e33d872012-05-17 17:46:55 +01001164 dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr,
1165 state.unmap_len, DMA_TO_DEVICE);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001166 }
Ben Hutchings5988b632008-09-01 12:46:36 +01001167
Ben Hutchingsdfa50be2013-03-08 21:20:09 +00001168 /* Free the header DMA mapping, if using option descriptors */
1169 if (state.header_unmap_len)
1170 dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr,
1171 state.header_unmap_len, DMA_TO_DEVICE);
1172
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001173 efx_enqueue_unwind(tx_queue);
Ben Hutchings14bf7182012-05-22 01:27:58 +01001174 return NETDEV_TX_OK;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001175}