blob: a23ba0dbdd1d72eb6df65f62d1c955bba961d716 [file] [log] [blame]
Ben Hutchings8ceee662008-04-27 12:55:59 +01001/****************************************************************************
Ben Hutchingsf7a6d2c2013-08-29 23:32:48 +01002 * Driver for Solarflare network controllers and boards
Ben Hutchings8ceee662008-04-27 12:55:59 +01003 * Copyright 2005-2006 Fen Systems Ltd.
Ben Hutchingsf7a6d2c2013-08-29 23:32:48 +01004 * Copyright 2005-2013 Solarflare Communications Inc.
Ben Hutchings8ceee662008-04-27 12:55:59 +01005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/pci.h>
12#include <linux/tcp.h>
13#include <linux/ip.h>
14#include <linux/in.h>
Ben Hutchings738a8f42009-11-29 15:16:05 +000015#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/slab.h>
Ben Hutchings738a8f42009-11-29 15:16:05 +000017#include <net/ipv6.h>
Ben Hutchings8ceee662008-04-27 12:55:59 +010018#include <linux/if_ether.h>
19#include <linux/highmem.h>
20#include "net_driver.h"
Ben Hutchings8ceee662008-04-27 12:55:59 +010021#include "efx.h"
Ben Hutchings744093c2009-11-29 15:12:08 +000022#include "nic.h"
Ben Hutchings8ceee662008-04-27 12:55:59 +010023#include "workarounds.h"
Ben Hutchingsdfa50be2013-03-08 21:20:09 +000024#include "ef10_regs.h"
Ben Hutchings8ceee662008-04-27 12:55:59 +010025
Ben Hutchings4d566062008-09-01 12:47:12 +010026static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
Tom Herbertc3940992011-11-28 16:33:43 +000027 struct efx_tx_buffer *buffer,
28 unsigned int *pkts_compl,
29 unsigned int *bytes_compl)
Ben Hutchings8ceee662008-04-27 12:55:59 +010030{
31 if (buffer->unmap_len) {
Ben Hutchings0e33d872012-05-17 17:46:55 +010032 struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
Ben Hutchingscc12dac2008-09-01 12:46:43 +010033 dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
34 buffer->unmap_len);
Ben Hutchings7668ff92012-05-17 20:52:20 +010035 if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
Ben Hutchings0e33d872012-05-17 17:46:55 +010036 dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
37 DMA_TO_DEVICE);
Ben Hutchings8ceee662008-04-27 12:55:59 +010038 else
Ben Hutchings0e33d872012-05-17 17:46:55 +010039 dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
40 DMA_TO_DEVICE);
Ben Hutchings8ceee662008-04-27 12:55:59 +010041 buffer->unmap_len = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +010042 }
43
Ben Hutchings7668ff92012-05-17 20:52:20 +010044 if (buffer->flags & EFX_TX_BUF_SKB) {
Tom Herbertc3940992011-11-28 16:33:43 +000045 (*pkts_compl)++;
46 (*bytes_compl) += buffer->skb->len;
Ben Hutchings8ceee662008-04-27 12:55:59 +010047 dev_kfree_skb_any((struct sk_buff *) buffer->skb);
Ben Hutchings62776d02010-06-23 11:30:07 +000048 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
49 "TX queue %d transmission id %x complete\n",
50 tx_queue->queue, tx_queue->read_count);
Ben Hutchingsf7251a92012-05-17 18:40:54 +010051 } else if (buffer->flags & EFX_TX_BUF_HEAP) {
52 kfree(buffer->heap_buf);
Ben Hutchings8ceee662008-04-27 12:55:59 +010053 }
Ben Hutchings7668ff92012-05-17 20:52:20 +010054
Ben Hutchingsf7251a92012-05-17 18:40:54 +010055 buffer->len = 0;
56 buffer->flags = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +010057}
58
Ben Hutchingsb9b39b62008-05-07 12:51:12 +010059static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
Ben Hutchings740847d2008-09-01 12:48:23 +010060 struct sk_buff *skb);
Ben Hutchings8ceee662008-04-27 12:55:59 +010061
Ben Hutchings63f19882009-10-23 08:31:20 +000062static inline unsigned
63efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
64{
65 /* Depending on the NIC revision, we can use descriptor
66 * lengths up to 8K or 8K-1. However, since PCI Express
67 * devices must split read requests at 4K boundaries, there is
68 * little benefit from using descriptors that cross those
69 * boundaries and we keep things simple by not doing so.
70 */
Ben Hutchings5b6262d2012-02-02 21:21:15 +000071 unsigned len = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1;
Ben Hutchings63f19882009-10-23 08:31:20 +000072
73 /* Work around hardware bug for unaligned buffers. */
74 if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
75 len = min_t(unsigned, len, 512 - (dma_addr & 0xf));
76
77 return len;
78}
79
Ben Hutchings7e6d06f2012-07-30 15:57:44 +000080unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
81{
82 /* Header and payload descriptor for each output segment, plus
83 * one for every input fragment boundary within a segment
84 */
85 unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
86
Ben Hutchingsdfa50be2013-03-08 21:20:09 +000087 /* Possibly one more per segment for the alignment workaround,
88 * or for option descriptors
89 */
90 if (EFX_WORKAROUND_5391(efx) || efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
Ben Hutchings7e6d06f2012-07-30 15:57:44 +000091 max_descs += EFX_TSO_MAX_SEGS;
92
93 /* Possibly more for PCIe page boundaries within input fragments */
94 if (PAGE_SIZE > EFX_PAGE_SIZE)
95 max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
96 DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
97
98 return max_descs;
99}
100
Ben Hutchings14bf7182012-05-22 01:27:58 +0100101/* Get partner of a TX queue, seen as part of the same net core queue */
102static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
103{
104 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
105 return tx_queue - EFX_TXQ_TYPE_OFFLOAD;
106 else
107 return tx_queue + EFX_TXQ_TYPE_OFFLOAD;
108}
109
110static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
111{
112 /* We need to consider both queues that the net core sees as one */
113 struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1);
114 struct efx_nic *efx = txq1->efx;
115 unsigned int fill_level;
116
117 fill_level = max(txq1->insert_count - txq1->old_read_count,
118 txq2->insert_count - txq2->old_read_count);
119 if (likely(fill_level < efx->txq_stop_thresh))
120 return;
121
122 /* We used the stale old_read_count above, which gives us a
123 * pessimistic estimate of the fill level (which may even
124 * validly be >= efx->txq_entries). Now try again using
125 * read_count (more likely to be a cache miss).
126 *
127 * If we read read_count and then conditionally stop the
128 * queue, it is possible for the completion path to race with
129 * us and complete all outstanding descriptors in the middle,
130 * after which there will be no more completions to wake it.
131 * Therefore we stop the queue first, then read read_count
132 * (with a memory barrier to ensure the ordering), then
133 * restart the queue if the fill level turns out to be low
134 * enough.
135 */
136 netif_tx_stop_queue(txq1->core_txq);
137 smp_mb();
138 txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
139 txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
140
141 fill_level = max(txq1->insert_count - txq1->old_read_count,
142 txq2->insert_count - txq2->old_read_count);
143 EFX_BUG_ON_PARANOID(fill_level >= efx->txq_entries);
144 if (likely(fill_level < efx->txq_stop_thresh)) {
145 smp_mb();
146 if (likely(!efx->loopback_selftest))
147 netif_tx_start_queue(txq1->core_txq);
148 }
149}
150
Ben Hutchings8ceee662008-04-27 12:55:59 +0100151/*
152 * Add a socket buffer to a TX queue
153 *
154 * This maps all fragments of a socket buffer for DMA and adds them to
155 * the TX queue. The queue's insert pointer will be incremented by
156 * the number of fragments in the socket buffer.
157 *
158 * If any DMA mapping fails, any mapped fragments will be unmapped,
159 * the queue's insert pointer will be restored to its original value.
160 *
Ben Hutchings497f5ba2009-11-23 16:07:05 +0000161 * This function is split out from efx_hard_start_xmit to allow the
162 * loopback test to direct packets via specific TX queues.
163 *
Ben Hutchings14bf7182012-05-22 01:27:58 +0100164 * Returns NETDEV_TX_OK.
Ben Hutchings8ceee662008-04-27 12:55:59 +0100165 * You must hold netif_tx_lock() to call this function.
166 */
Ben Hutchings497f5ba2009-11-23 16:07:05 +0000167netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100168{
169 struct efx_nic *efx = tx_queue->efx;
Ben Hutchings0e33d872012-05-17 17:46:55 +0100170 struct device *dma_dev = &efx->pci_dev->dev;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100171 struct efx_tx_buffer *buffer;
172 skb_frag_t *fragment;
Ben Hutchings14bf7182012-05-22 01:27:58 +0100173 unsigned int len, unmap_len = 0, insert_ptr;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100174 dma_addr_t dma_addr, unmap_addr = 0;
175 unsigned int dma_len;
Ben Hutchings7668ff92012-05-17 20:52:20 +0100176 unsigned short dma_flags;
Ben Hutchings14bf7182012-05-22 01:27:58 +0100177 int i = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100178
179 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
180
Ben Hutchings9bc183d2009-11-23 16:06:47 +0000181 if (skb_shinfo(skb)->gso_size)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100182 return efx_enqueue_skb_tso(tx_queue, skb);
183
Ben Hutchings8ceee662008-04-27 12:55:59 +0100184 /* Get size of the initial fragment */
185 len = skb_headlen(skb);
186
Ben Hutchingsbb145a92009-03-20 13:25:39 +0000187 /* Pad if necessary */
188 if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) {
189 EFX_BUG_ON_PARANOID(skb->data_len);
190 len = 32 + 1;
191 if (skb_pad(skb, len - skb->len))
192 return NETDEV_TX_OK;
193 }
194
Ben Hutchings0e33d872012-05-17 17:46:55 +0100195 /* Map for DMA. Use dma_map_single rather than dma_map_page
Ben Hutchings8ceee662008-04-27 12:55:59 +0100196 * since this is more efficient on machines with sparse
197 * memory.
198 */
Ben Hutchings7668ff92012-05-17 20:52:20 +0100199 dma_flags = EFX_TX_BUF_MAP_SINGLE;
Ben Hutchings0e33d872012-05-17 17:46:55 +0100200 dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100201
202 /* Process all fragments */
203 while (1) {
Ben Hutchings0e33d872012-05-17 17:46:55 +0100204 if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
205 goto dma_err;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100206
207 /* Store fields for marking in the per-fragment final
208 * descriptor */
209 unmap_len = len;
210 unmap_addr = dma_addr;
211
212 /* Add to TX queue, splitting across DMA boundaries */
213 do {
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000214 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100215 buffer = &tx_queue->buffer[insert_ptr];
Ben Hutchings7668ff92012-05-17 20:52:20 +0100216 EFX_BUG_ON_PARANOID(buffer->flags);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100217 EFX_BUG_ON_PARANOID(buffer->len);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100218 EFX_BUG_ON_PARANOID(buffer->unmap_len);
219
Ben Hutchings63f19882009-10-23 08:31:20 +0000220 dma_len = efx_max_tx_len(efx, dma_addr);
221 if (likely(dma_len >= len))
Ben Hutchings8ceee662008-04-27 12:55:59 +0100222 dma_len = len;
223
Ben Hutchings8ceee662008-04-27 12:55:59 +0100224 /* Fill out per descriptor fields */
225 buffer->len = dma_len;
226 buffer->dma_addr = dma_addr;
Ben Hutchings7668ff92012-05-17 20:52:20 +0100227 buffer->flags = EFX_TX_BUF_CONT;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100228 len -= dma_len;
229 dma_addr += dma_len;
230 ++tx_queue->insert_count;
231 } while (len);
232
233 /* Transfer ownership of the unmapping to the final buffer */
Ben Hutchings7668ff92012-05-17 20:52:20 +0100234 buffer->flags = EFX_TX_BUF_CONT | dma_flags;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100235 buffer->unmap_len = unmap_len;
236 unmap_len = 0;
237
238 /* Get address and size of next fragment */
239 if (i >= skb_shinfo(skb)->nr_frags)
240 break;
241 fragment = &skb_shinfo(skb)->frags[i];
Eric Dumazet9e903e02011-10-18 21:00:24 +0000242 len = skb_frag_size(fragment);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100243 i++;
244 /* Map for DMA */
Ben Hutchings7668ff92012-05-17 20:52:20 +0100245 dma_flags = 0;
Ben Hutchings0e33d872012-05-17 17:46:55 +0100246 dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
Ian Campbell5d6bcdf2011-10-06 11:10:48 +0100247 DMA_TO_DEVICE);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100248 }
249
250 /* Transfer ownership of the skb to the final buffer */
251 buffer->skb = skb;
Ben Hutchings7668ff92012-05-17 20:52:20 +0100252 buffer->flags = EFX_TX_BUF_SKB | dma_flags;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100253
Tom Herbertc3940992011-11-28 16:33:43 +0000254 netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
255
Ben Hutchings8ceee662008-04-27 12:55:59 +0100256 /* Pass off to hardware */
Ben Hutchings152b6a62009-11-29 03:43:56 +0000257 efx_nic_push_buffers(tx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100258
Ben Hutchings14bf7182012-05-22 01:27:58 +0100259 efx_tx_maybe_stop_queue(tx_queue);
260
Ben Hutchings8ceee662008-04-27 12:55:59 +0100261 return NETDEV_TX_OK;
262
Ben Hutchings0e33d872012-05-17 17:46:55 +0100263 dma_err:
Ben Hutchings62776d02010-06-23 11:30:07 +0000264 netif_err(efx, tx_err, efx->net_dev,
265 " TX queue %d could not map skb with %d bytes %d "
266 "fragments for DMA\n", tx_queue->queue, skb->len,
267 skb_shinfo(skb)->nr_frags + 1);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100268
269 /* Mark the packet as transmitted, and free the SKB ourselves */
Ben Hutchings9bc183d2009-11-23 16:06:47 +0000270 dev_kfree_skb_any(skb);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100271
Ben Hutchings8ceee662008-04-27 12:55:59 +0100272 /* Work backwards until we hit the original insert pointer value */
273 while (tx_queue->insert_count != tx_queue->write_count) {
Tom Herbertc3940992011-11-28 16:33:43 +0000274 unsigned int pkts_compl = 0, bytes_compl = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100275 --tx_queue->insert_count;
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000276 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100277 buffer = &tx_queue->buffer[insert_ptr];
Tom Herbertc3940992011-11-28 16:33:43 +0000278 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100279 }
280
281 /* Free the fragment we were mid-way through pushing */
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100282 if (unmap_len) {
Ben Hutchings7668ff92012-05-17 20:52:20 +0100283 if (dma_flags & EFX_TX_BUF_MAP_SINGLE)
Ben Hutchings0e33d872012-05-17 17:46:55 +0100284 dma_unmap_single(dma_dev, unmap_addr, unmap_len,
285 DMA_TO_DEVICE);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100286 else
Ben Hutchings0e33d872012-05-17 17:46:55 +0100287 dma_unmap_page(dma_dev, unmap_addr, unmap_len,
288 DMA_TO_DEVICE);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100289 }
Ben Hutchings8ceee662008-04-27 12:55:59 +0100290
Ben Hutchings14bf7182012-05-22 01:27:58 +0100291 return NETDEV_TX_OK;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100292}
293
294/* Remove packets from the TX queue
295 *
296 * This removes packets from the TX queue, up to and including the
297 * specified index.
298 */
Ben Hutchings4d566062008-09-01 12:47:12 +0100299static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
Tom Herbertc3940992011-11-28 16:33:43 +0000300 unsigned int index,
301 unsigned int *pkts_compl,
302 unsigned int *bytes_compl)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100303{
304 struct efx_nic *efx = tx_queue->efx;
305 unsigned int stop_index, read_ptr;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100306
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000307 stop_index = (index + 1) & tx_queue->ptr_mask;
308 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100309
310 while (read_ptr != stop_index) {
311 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
Ben Hutchingsba8977b2013-01-08 23:43:19 +0000312
313 if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
314 unlikely(buffer->len == 0)) {
Ben Hutchings62776d02010-06-23 11:30:07 +0000315 netif_err(efx, tx_err, efx->net_dev,
316 "TX queue %d spurious TX completion id %x\n",
317 tx_queue->queue, read_ptr);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100318 efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
319 return;
320 }
321
Tom Herbertc3940992011-11-28 16:33:43 +0000322 efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100323
324 ++tx_queue->read_count;
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000325 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100326 }
327}
328
Ben Hutchings8ceee662008-04-27 12:55:59 +0100329/* Initiate a packet transmission. We use one channel per CPU
330 * (sharing when we have more CPUs than channels). On Falcon, the TX
331 * completion events will be directed back to the CPU that transmitted
332 * the packet, which should be cache-efficient.
333 *
334 * Context: non-blocking.
335 * Note that returning anything other than NETDEV_TX_OK will cause the
336 * OS to free the skb.
337 */
Stephen Hemminger613573252009-08-31 19:50:58 +0000338netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
Ben Hutchings2d0cc562012-02-17 00:10:45 +0000339 struct net_device *net_dev)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100340{
Ben Hutchings767e4682008-09-01 12:43:14 +0100341 struct efx_nic *efx = netdev_priv(net_dev);
Ben Hutchings60ac1062008-09-01 12:44:59 +0100342 struct efx_tx_queue *tx_queue;
Ben Hutchings94b274b2011-01-10 21:18:20 +0000343 unsigned index, type;
Ben Hutchings60ac1062008-09-01 12:44:59 +0100344
Ben Hutchingse4abce82011-05-16 18:51:24 +0100345 EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
Ben Hutchingsa7ef5932009-03-04 09:52:37 +0000346
Stuart Hodgson7c236c42012-09-03 11:09:36 +0100347 /* PTP "event" packet */
348 if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
349 unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
350 return efx_ptp_tx(efx, skb);
351 }
352
Ben Hutchings94b274b2011-01-10 21:18:20 +0000353 index = skb_get_queue_mapping(skb);
354 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
355 if (index >= efx->n_tx_channels) {
356 index -= efx->n_tx_channels;
357 type |= EFX_TXQ_TYPE_HIGHPRI;
358 }
359 tx_queue = efx_get_tx_queue(efx, index, type);
Ben Hutchings60ac1062008-09-01 12:44:59 +0100360
Ben Hutchings497f5ba2009-11-23 16:07:05 +0000361 return efx_enqueue_skb(tx_queue, skb);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100362}
363
Ben Hutchings60031fc2011-01-12 18:39:40 +0000364void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
365{
Ben Hutchings94b274b2011-01-10 21:18:20 +0000366 struct efx_nic *efx = tx_queue->efx;
367
Ben Hutchings60031fc2011-01-12 18:39:40 +0000368 /* Must be inverse of queue lookup in efx_hard_start_xmit() */
Ben Hutchings94b274b2011-01-10 21:18:20 +0000369 tx_queue->core_txq =
370 netdev_get_tx_queue(efx->net_dev,
371 tx_queue->queue / EFX_TXQ_TYPES +
372 ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
373 efx->n_tx_channels : 0));
374}
375
376int efx_setup_tc(struct net_device *net_dev, u8 num_tc)
377{
378 struct efx_nic *efx = netdev_priv(net_dev);
379 struct efx_channel *channel;
380 struct efx_tx_queue *tx_queue;
381 unsigned tc;
382 int rc;
383
384 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
385 return -EINVAL;
386
387 if (num_tc == net_dev->num_tc)
388 return 0;
389
390 for (tc = 0; tc < num_tc; tc++) {
391 net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
392 net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
393 }
394
395 if (num_tc > net_dev->num_tc) {
396 /* Initialise high-priority queues as necessary */
397 efx_for_each_channel(channel, efx) {
398 efx_for_each_possible_channel_tx_queue(tx_queue,
399 channel) {
400 if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
401 continue;
402 if (!tx_queue->buffer) {
403 rc = efx_probe_tx_queue(tx_queue);
404 if (rc)
405 return rc;
406 }
407 if (!tx_queue->initialised)
408 efx_init_tx_queue(tx_queue);
409 efx_init_tx_queue_core_txq(tx_queue);
410 }
411 }
412 } else {
413 /* Reduce number of classes before number of queues */
414 net_dev->num_tc = num_tc;
415 }
416
417 rc = netif_set_real_num_tx_queues(net_dev,
418 max_t(int, num_tc, 1) *
419 efx->n_tx_channels);
420 if (rc)
421 return rc;
422
423 /* Do not destroy high-priority queues when they become
424 * unused. We would have to flush them first, and it is
425 * fairly difficult to flush a subset of TX queues. Leave
426 * it to efx_fini_channels().
427 */
428
429 net_dev->num_tc = num_tc;
430 return 0;
Ben Hutchings60031fc2011-01-12 18:39:40 +0000431}
432
Ben Hutchings8ceee662008-04-27 12:55:59 +0100433void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
434{
435 unsigned fill_level;
436 struct efx_nic *efx = tx_queue->efx;
Ben Hutchings14bf7182012-05-22 01:27:58 +0100437 struct efx_tx_queue *txq2;
Tom Herbertc3940992011-11-28 16:33:43 +0000438 unsigned int pkts_compl = 0, bytes_compl = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100439
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000440 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100441
Tom Herbertc3940992011-11-28 16:33:43 +0000442 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
443 netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100444
Ben Hutchings02e12162013-04-27 01:55:21 +0100445 if (pkts_compl > 1)
446 ++tx_queue->merge_events;
447
Ben Hutchings14bf7182012-05-22 01:27:58 +0100448 /* See if we need to restart the netif queue. This memory
449 * barrier ensures that we write read_count (inside
450 * efx_dequeue_buffers()) before reading the queue status.
451 */
Ben Hutchings8ceee662008-04-27 12:55:59 +0100452 smp_mb();
Ben Hutchingsc04bfc62010-12-10 01:24:16 +0000453 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
Neil Turton9d1aea62011-04-04 13:46:23 +0100454 likely(efx->port_enabled) &&
Ben Hutchingse4abce82011-05-16 18:51:24 +0100455 likely(netif_device_present(efx->net_dev))) {
Ben Hutchings14bf7182012-05-22 01:27:58 +0100456 txq2 = efx_tx_queue_partner(tx_queue);
457 fill_level = max(tx_queue->insert_count - tx_queue->read_count,
458 txq2->insert_count - txq2->read_count);
459 if (fill_level <= efx->txq_wake_thresh)
Ben Hutchingsc04bfc62010-12-10 01:24:16 +0000460 netif_tx_wake_queue(tx_queue->core_txq);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100461 }
Ben Hutchingscd385572010-11-15 23:53:11 +0000462
463 /* Check whether the hardware queue is now empty */
464 if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
465 tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
466 if (tx_queue->read_count == tx_queue->old_write_count) {
467 smp_mb();
468 tx_queue->empty_read_count =
469 tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
470 }
471 }
Ben Hutchings8ceee662008-04-27 12:55:59 +0100472}
473
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100474/* Size of page-based TSO header buffers. Larger blocks must be
475 * allocated from the heap.
476 */
477#define TSOH_STD_SIZE 128
478#define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE)
479
480/* At most half the descriptors in the queue at any time will refer to
481 * a TSO header buffer, since they must always be followed by a
482 * payload descriptor referring to an skb.
483 */
484static unsigned int efx_tsoh_page_count(struct efx_tx_queue *tx_queue)
485{
486 return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 2 * TSOH_PER_PAGE);
487}
488
Ben Hutchings8ceee662008-04-27 12:55:59 +0100489int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
490{
491 struct efx_nic *efx = tx_queue->efx;
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000492 unsigned int entries;
Ben Hutchings7668ff92012-05-17 20:52:20 +0100493 int rc;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100494
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000495 /* Create the smallest power-of-two aligned ring */
496 entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
497 EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
498 tx_queue->ptr_mask = entries - 1;
499
500 netif_dbg(efx, probe, efx->net_dev,
501 "creating TX queue %d size %#x mask %#x\n",
502 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100503
504 /* Allocate software ring */
Thomas Meyerc2e4e252011-12-02 12:36:13 +0000505 tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000506 GFP_KERNEL);
Ben Hutchings60ac1062008-09-01 12:44:59 +0100507 if (!tx_queue->buffer)
508 return -ENOMEM;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100509
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100510 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) {
511 tx_queue->tsoh_page =
512 kcalloc(efx_tsoh_page_count(tx_queue),
513 sizeof(tx_queue->tsoh_page[0]), GFP_KERNEL);
514 if (!tx_queue->tsoh_page) {
515 rc = -ENOMEM;
516 goto fail1;
517 }
518 }
519
Ben Hutchings8ceee662008-04-27 12:55:59 +0100520 /* Allocate hardware ring */
Ben Hutchings152b6a62009-11-29 03:43:56 +0000521 rc = efx_nic_probe_tx(tx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100522 if (rc)
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100523 goto fail2;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100524
525 return 0;
526
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100527fail2:
528 kfree(tx_queue->tsoh_page);
529 tx_queue->tsoh_page = NULL;
530fail1:
Ben Hutchings8ceee662008-04-27 12:55:59 +0100531 kfree(tx_queue->buffer);
532 tx_queue->buffer = NULL;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100533 return rc;
534}
535
Ben Hutchingsbc3c90a2008-09-01 12:48:46 +0100536void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100537{
Ben Hutchings62776d02010-06-23 11:30:07 +0000538 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
539 "initialising TX queue %d\n", tx_queue->queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100540
541 tx_queue->insert_count = 0;
542 tx_queue->write_count = 0;
Ben Hutchingscd385572010-11-15 23:53:11 +0000543 tx_queue->old_write_count = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100544 tx_queue->read_count = 0;
545 tx_queue->old_read_count = 0;
Ben Hutchingscd385572010-11-15 23:53:11 +0000546 tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100547
548 /* Set up TX descriptor ring */
Ben Hutchings152b6a62009-11-29 03:43:56 +0000549 efx_nic_init_tx(tx_queue);
Ben Hutchings94b274b2011-01-10 21:18:20 +0000550
551 tx_queue->initialised = true;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100552}
553
Ben Hutchingse42c3d82013-05-27 16:52:54 +0100554void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100555{
556 struct efx_tx_buffer *buffer;
557
Ben Hutchingse42c3d82013-05-27 16:52:54 +0100558 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
559 "shutting down TX queue %d\n", tx_queue->queue);
560
Ben Hutchings8ceee662008-04-27 12:55:59 +0100561 if (!tx_queue->buffer)
562 return;
563
564 /* Free any buffers left in the ring */
565 while (tx_queue->read_count != tx_queue->write_count) {
Tom Herbertc3940992011-11-28 16:33:43 +0000566 unsigned int pkts_compl = 0, bytes_compl = 0;
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000567 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
Tom Herbertc3940992011-11-28 16:33:43 +0000568 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100569
570 ++tx_queue->read_count;
571 }
Tom Herbertc3940992011-11-28 16:33:43 +0000572 netdev_tx_reset_queue(tx_queue->core_txq);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100573}
574
Ben Hutchings8ceee662008-04-27 12:55:59 +0100575void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
576{
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100577 int i;
578
Ben Hutchings94b274b2011-01-10 21:18:20 +0000579 if (!tx_queue->buffer)
580 return;
581
Ben Hutchings62776d02010-06-23 11:30:07 +0000582 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
583 "destroying TX queue %d\n", tx_queue->queue);
Ben Hutchings152b6a62009-11-29 03:43:56 +0000584 efx_nic_remove_tx(tx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100585
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100586 if (tx_queue->tsoh_page) {
587 for (i = 0; i < efx_tsoh_page_count(tx_queue); i++)
588 efx_nic_free_buffer(tx_queue->efx,
589 &tx_queue->tsoh_page[i]);
590 kfree(tx_queue->tsoh_page);
591 tx_queue->tsoh_page = NULL;
592 }
593
Ben Hutchings8ceee662008-04-27 12:55:59 +0100594 kfree(tx_queue->buffer);
595 tx_queue->buffer = NULL;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100596}
597
598
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100599/* Efx TCP segmentation acceleration.
600 *
601 * Why? Because by doing it here in the driver we can go significantly
602 * faster than the GSO.
603 *
604 * Requires TX checksum offload support.
605 */
606
607/* Number of bytes inserted at the start of a TSO header buffer,
608 * similar to NET_IP_ALIGN.
609 */
Ben Hutchings13e9ab12008-09-01 12:50:28 +0100610#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100611#define TSOH_OFFSET 0
612#else
613#define TSOH_OFFSET NET_IP_ALIGN
614#endif
615
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100616#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100617
618/**
619 * struct tso_state - TSO state for an SKB
Ben Hutchings23d9e602008-09-01 12:47:02 +0100620 * @out_len: Remaining length in current segment
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100621 * @seqnum: Current sequence number
Ben Hutchings23d9e602008-09-01 12:47:02 +0100622 * @ipv4_id: Current IPv4 ID, host endian
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100623 * @packet_space: Remaining space in current packet
Ben Hutchings23d9e602008-09-01 12:47:02 +0100624 * @dma_addr: DMA address of current position
625 * @in_len: Remaining length in current SKB fragment
626 * @unmap_len: Length of SKB fragment
627 * @unmap_addr: DMA address of SKB fragment
Ben Hutchings7668ff92012-05-17 20:52:20 +0100628 * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0
Ben Hutchings738a8f42009-11-29 15:16:05 +0000629 * @protocol: Network protocol (after any VLAN header)
Ben Hutchings97142842012-06-22 02:44:01 +0100630 * @ip_off: Offset of IP header
631 * @tcp_off: Offset of TCP header
Ben Hutchings23d9e602008-09-01 12:47:02 +0100632 * @header_len: Number of bytes of header
Ben Hutchings53cb13c2012-06-19 20:03:41 +0100633 * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000634 * @header_dma_addr: Header DMA address, when using option descriptors
635 * @header_unmap_len: Header DMA mapped length, or 0 if not using option
636 * descriptors
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100637 *
638 * The state used during segmentation. It is put into this data structure
639 * just to make it easy to pass into inline functions.
640 */
641struct tso_state {
Ben Hutchings23d9e602008-09-01 12:47:02 +0100642 /* Output position */
643 unsigned out_len;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100644 unsigned seqnum;
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000645 u16 ipv4_id;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100646 unsigned packet_space;
647
Ben Hutchings23d9e602008-09-01 12:47:02 +0100648 /* Input position */
649 dma_addr_t dma_addr;
650 unsigned in_len;
651 unsigned unmap_len;
652 dma_addr_t unmap_addr;
Ben Hutchings7668ff92012-05-17 20:52:20 +0100653 unsigned short dma_flags;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100654
Ben Hutchings738a8f42009-11-29 15:16:05 +0000655 __be16 protocol;
Ben Hutchings97142842012-06-22 02:44:01 +0100656 unsigned int ip_off;
657 unsigned int tcp_off;
Ben Hutchings23d9e602008-09-01 12:47:02 +0100658 unsigned header_len;
Ben Hutchings53cb13c2012-06-19 20:03:41 +0100659 unsigned int ip_base_len;
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000660 dma_addr_t header_dma_addr;
661 unsigned int header_unmap_len;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100662};
663
664
665/*
666 * Verify that our various assumptions about sk_buffs and the conditions
Ben Hutchings738a8f42009-11-29 15:16:05 +0000667 * under which TSO will be attempted hold true. Return the protocol number.
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100668 */
Ben Hutchings738a8f42009-11-29 15:16:05 +0000669static __be16 efx_tso_check_protocol(struct sk_buff *skb)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100670{
Ben Hutchings740847d2008-09-01 12:48:23 +0100671 __be16 protocol = skb->protocol;
672
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100673 EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
Ben Hutchings740847d2008-09-01 12:48:23 +0100674 protocol);
675 if (protocol == htons(ETH_P_8021Q)) {
Ben Hutchings740847d2008-09-01 12:48:23 +0100676 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
677 protocol = veh->h_vlan_encapsulated_proto;
Ben Hutchings740847d2008-09-01 12:48:23 +0100678 }
679
Ben Hutchings738a8f42009-11-29 15:16:05 +0000680 if (protocol == htons(ETH_P_IP)) {
681 EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
682 } else {
683 EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6));
684 EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP);
685 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100686 EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
687 + (tcp_hdr(skb)->doff << 2u)) >
688 skb_headlen(skb));
Ben Hutchings738a8f42009-11-29 15:16:05 +0000689
690 return protocol;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100691}
692
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100693static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
694 struct efx_tx_buffer *buffer, unsigned int len)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100695{
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100696 u8 *result;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100697
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100698 EFX_BUG_ON_PARANOID(buffer->len);
699 EFX_BUG_ON_PARANOID(buffer->flags);
700 EFX_BUG_ON_PARANOID(buffer->unmap_len);
701
702 if (likely(len <= TSOH_STD_SIZE - TSOH_OFFSET)) {
703 unsigned index =
704 (tx_queue->insert_count & tx_queue->ptr_mask) / 2;
705 struct efx_buffer *page_buf =
706 &tx_queue->tsoh_page[index / TSOH_PER_PAGE];
707 unsigned offset =
708 TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET;
709
710 if (unlikely(!page_buf->addr) &&
Ben Hutchings0d19a542012-09-18 21:59:52 +0100711 efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
712 GFP_ATOMIC))
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100713 return NULL;
714
715 result = (u8 *)page_buf->addr + offset;
716 buffer->dma_addr = page_buf->dma_addr + offset;
717 buffer->flags = EFX_TX_BUF_CONT;
718 } else {
719 tx_queue->tso_long_headers++;
720
721 buffer->heap_buf = kmalloc(TSOH_OFFSET + len, GFP_ATOMIC);
722 if (unlikely(!buffer->heap_buf))
723 return NULL;
724 result = (u8 *)buffer->heap_buf + TSOH_OFFSET;
725 buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100726 }
727
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100728 buffer->len = len;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100729
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100730 return result;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100731}
732
733/**
734 * efx_tx_queue_insert - push descriptors onto the TX queue
735 * @tx_queue: Efx TX queue
736 * @dma_addr: DMA address of fragment
737 * @len: Length of fragment
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100738 * @final_buffer: The final buffer inserted into the queue
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100739 *
Ben Hutchings14bf7182012-05-22 01:27:58 +0100740 * Push descriptors onto the TX queue.
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100741 */
Ben Hutchings14bf7182012-05-22 01:27:58 +0100742static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
743 dma_addr_t dma_addr, unsigned len,
744 struct efx_tx_buffer **final_buffer)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100745{
746 struct efx_tx_buffer *buffer;
747 struct efx_nic *efx = tx_queue->efx;
Ben Hutchings14bf7182012-05-22 01:27:58 +0100748 unsigned dma_len, insert_ptr;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100749
750 EFX_BUG_ON_PARANOID(len <= 0);
751
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100752 while (1) {
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000753 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100754 buffer = &tx_queue->buffer[insert_ptr];
755 ++tx_queue->insert_count;
756
757 EFX_BUG_ON_PARANOID(tx_queue->insert_count -
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000758 tx_queue->read_count >=
759 efx->txq_entries);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100760
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100761 EFX_BUG_ON_PARANOID(buffer->len);
762 EFX_BUG_ON_PARANOID(buffer->unmap_len);
Ben Hutchings7668ff92012-05-17 20:52:20 +0100763 EFX_BUG_ON_PARANOID(buffer->flags);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100764
765 buffer->dma_addr = dma_addr;
766
Ben Hutchings63f19882009-10-23 08:31:20 +0000767 dma_len = efx_max_tx_len(efx, dma_addr);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100768
769 /* If there is enough space to send then do so */
770 if (dma_len >= len)
771 break;
772
Ben Hutchings7668ff92012-05-17 20:52:20 +0100773 buffer->len = dma_len;
774 buffer->flags = EFX_TX_BUF_CONT;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100775 dma_addr += dma_len;
776 len -= dma_len;
777 }
778
779 EFX_BUG_ON_PARANOID(!len);
780 buffer->len = len;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100781 *final_buffer = buffer;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100782}
783
784
785/*
786 * Put a TSO header into the TX queue.
787 *
788 * This is special-cased because we know that it is small enough to fit in
789 * a single fragment, and we know it doesn't cross a page boundary. It
790 * also allows us to not worry about end-of-packet etc.
791 */
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100792static int efx_tso_put_header(struct efx_tx_queue *tx_queue,
793 struct efx_tx_buffer *buffer, u8 *header)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100794{
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100795 if (unlikely(buffer->flags & EFX_TX_BUF_HEAP)) {
796 buffer->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
797 header, buffer->len,
798 DMA_TO_DEVICE);
799 if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
800 buffer->dma_addr))) {
801 kfree(buffer->heap_buf);
802 buffer->len = 0;
803 buffer->flags = 0;
804 return -ENOMEM;
805 }
806 buffer->unmap_len = buffer->len;
807 buffer->flags |= EFX_TX_BUF_MAP_SINGLE;
808 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100809
810 ++tx_queue->insert_count;
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100811 return 0;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100812}
813
814
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100815/* Remove buffers put into a tx_queue. None of the buffers must have
816 * an skb attached.
817 */
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100818static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
819{
820 struct efx_tx_buffer *buffer;
821
822 /* Work backwards until we hit the original insert pointer value */
823 while (tx_queue->insert_count != tx_queue->write_count) {
824 --tx_queue->insert_count;
825 buffer = &tx_queue->buffer[tx_queue->insert_count &
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000826 tx_queue->ptr_mask];
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100827 efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100828 }
829}
830
831
832/* Parse the SKB header and initialise state. */
Ben Hutchingsc78c39e2013-03-08 20:03:17 +0000833static int tso_start(struct tso_state *st, struct efx_nic *efx,
834 const struct sk_buff *skb)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100835{
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000836 bool use_options = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
837 struct device *dma_dev = &efx->pci_dev->dev;
Ben Hutchingsc78c39e2013-03-08 20:03:17 +0000838 unsigned int header_len, in_len;
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000839 dma_addr_t dma_addr;
Ben Hutchingsc78c39e2013-03-08 20:03:17 +0000840
Ben Hutchings97142842012-06-22 02:44:01 +0100841 st->ip_off = skb_network_header(skb) - skb->data;
842 st->tcp_off = skb_transport_header(skb) - skb->data;
Ben Hutchingsc78c39e2013-03-08 20:03:17 +0000843 header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
844 in_len = skb_headlen(skb) - header_len;
845 st->header_len = header_len;
846 st->in_len = in_len;
Ben Hutchings53cb13c2012-06-19 20:03:41 +0100847 if (st->protocol == htons(ETH_P_IP)) {
Ben Hutchings97142842012-06-22 02:44:01 +0100848 st->ip_base_len = st->header_len - st->ip_off;
Ben Hutchings738a8f42009-11-29 15:16:05 +0000849 st->ipv4_id = ntohs(ip_hdr(skb)->id);
Ben Hutchings53cb13c2012-06-19 20:03:41 +0100850 } else {
Ben Hutchings97142842012-06-22 02:44:01 +0100851 st->ip_base_len = st->header_len - st->tcp_off;
Ben Hutchings738a8f42009-11-29 15:16:05 +0000852 st->ipv4_id = 0;
Ben Hutchings53cb13c2012-06-19 20:03:41 +0100853 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100854 st->seqnum = ntohl(tcp_hdr(skb)->seq);
855
856 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
857 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
858 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
859
Ben Hutchingsc78c39e2013-03-08 20:03:17 +0000860 st->out_len = skb->len - header_len;
861
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000862 if (!use_options) {
863 st->header_unmap_len = 0;
864
865 if (likely(in_len == 0)) {
866 st->dma_flags = 0;
867 st->unmap_len = 0;
868 return 0;
869 }
870
871 dma_addr = dma_map_single(dma_dev, skb->data + header_len,
872 in_len, DMA_TO_DEVICE);
873 st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
874 st->dma_addr = dma_addr;
875 st->unmap_addr = dma_addr;
876 st->unmap_len = in_len;
877 } else {
878 dma_addr = dma_map_single(dma_dev, skb->data,
879 skb_headlen(skb), DMA_TO_DEVICE);
880 st->header_dma_addr = dma_addr;
881 st->header_unmap_len = skb_headlen(skb);
Ben Hutchingsc78c39e2013-03-08 20:03:17 +0000882 st->dma_flags = 0;
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000883 st->dma_addr = dma_addr + header_len;
884 st->unmap_len = 0;
Ben Hutchingsc78c39e2013-03-08 20:03:17 +0000885 }
886
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000887 return unlikely(dma_mapping_error(dma_dev, dma_addr)) ? -ENOMEM : 0;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100888}
889
Ben Hutchings4d566062008-09-01 12:47:12 +0100890static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
891 skb_frag_t *frag)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100892{
Ian Campbell4a22c4c2011-09-21 21:53:16 +0000893 st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000894 skb_frag_size(frag), DMA_TO_DEVICE);
Ian Campbell5d6bcdf2011-10-06 11:10:48 +0100895 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
Ben Hutchings7668ff92012-05-17 20:52:20 +0100896 st->dma_flags = 0;
Eric Dumazet9e903e02011-10-18 21:00:24 +0000897 st->unmap_len = skb_frag_size(frag);
898 st->in_len = skb_frag_size(frag);
Ben Hutchings23d9e602008-09-01 12:47:02 +0100899 st->dma_addr = st->unmap_addr;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100900 return 0;
901 }
902 return -ENOMEM;
903}
904
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100905
906/**
907 * tso_fill_packet_with_fragment - form descriptors for the current fragment
908 * @tx_queue: Efx TX queue
909 * @skb: Socket buffer
910 * @st: TSO state
911 *
912 * Form descriptors for the current fragment, until we reach the end
Ben Hutchings14bf7182012-05-22 01:27:58 +0100913 * of fragment or end-of-packet.
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100914 */
Ben Hutchings14bf7182012-05-22 01:27:58 +0100915static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
916 const struct sk_buff *skb,
917 struct tso_state *st)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100918{
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100919 struct efx_tx_buffer *buffer;
Ben Hutchings14bf7182012-05-22 01:27:58 +0100920 int n;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100921
Ben Hutchings23d9e602008-09-01 12:47:02 +0100922 if (st->in_len == 0)
Ben Hutchings14bf7182012-05-22 01:27:58 +0100923 return;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100924 if (st->packet_space == 0)
Ben Hutchings14bf7182012-05-22 01:27:58 +0100925 return;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100926
Ben Hutchings23d9e602008-09-01 12:47:02 +0100927 EFX_BUG_ON_PARANOID(st->in_len <= 0);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100928 EFX_BUG_ON_PARANOID(st->packet_space <= 0);
929
Ben Hutchings23d9e602008-09-01 12:47:02 +0100930 n = min(st->in_len, st->packet_space);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100931
932 st->packet_space -= n;
Ben Hutchings23d9e602008-09-01 12:47:02 +0100933 st->out_len -= n;
934 st->in_len -= n;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100935
Ben Hutchings14bf7182012-05-22 01:27:58 +0100936 efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100937
Ben Hutchings14bf7182012-05-22 01:27:58 +0100938 if (st->out_len == 0) {
939 /* Transfer ownership of the skb */
940 buffer->skb = skb;
941 buffer->flags = EFX_TX_BUF_SKB;
942 } else if (st->packet_space != 0) {
943 buffer->flags = EFX_TX_BUF_CONT;
944 }
945
946 if (st->in_len == 0) {
947 /* Transfer ownership of the DMA mapping */
948 buffer->unmap_len = st->unmap_len;
949 buffer->flags |= st->dma_flags;
950 st->unmap_len = 0;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100951 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100952
Ben Hutchings23d9e602008-09-01 12:47:02 +0100953 st->dma_addr += n;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100954}
955
956
957/**
958 * tso_start_new_packet - generate a new header and prepare for the new packet
959 * @tx_queue: Efx TX queue
960 * @skb: Socket buffer
961 * @st: TSO state
962 *
963 * Generate a new header and prepare for the new packet. Return 0 on
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100964 * success, or -%ENOMEM if failed to alloc header.
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100965 */
Ben Hutchings4d566062008-09-01 12:47:12 +0100966static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
967 const struct sk_buff *skb,
968 struct tso_state *st)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100969{
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100970 struct efx_tx_buffer *buffer =
971 &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000972 bool is_last = st->out_len <= skb_shinfo(skb)->gso_size;
973 u8 tcp_flags_clear;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100974
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000975 if (!is_last) {
Ben Hutchings53cb13c2012-06-19 20:03:41 +0100976 st->packet_space = skb_shinfo(skb)->gso_size;
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000977 tcp_flags_clear = 0x09; /* mask out FIN and PSH */
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100978 } else {
Ben Hutchings53cb13c2012-06-19 20:03:41 +0100979 st->packet_space = st->out_len;
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000980 tcp_flags_clear = 0x00;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100981 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100982
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000983 if (!st->header_unmap_len) {
984 /* Allocate and insert a DMA-mapped header buffer. */
985 struct tcphdr *tsoh_th;
986 unsigned ip_length;
987 u8 *header;
988 int rc;
Ben Hutchings738a8f42009-11-29 15:16:05 +0000989
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000990 header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
991 if (!header)
992 return -ENOMEM;
Ben Hutchings738a8f42009-11-29 15:16:05 +0000993
Ben Hutchingsdfa50be2013-03-08 21:20:09 +0000994 tsoh_th = (struct tcphdr *)(header + st->tcp_off);
995
996 /* Copy and update the headers. */
997 memcpy(header, skb->data, st->header_len);
998
999 tsoh_th->seq = htonl(st->seqnum);
1000 ((u8 *)tsoh_th)[13] &= ~tcp_flags_clear;
1001
1002 ip_length = st->ip_base_len + st->packet_space;
1003
1004 if (st->protocol == htons(ETH_P_IP)) {
1005 struct iphdr *tsoh_iph =
1006 (struct iphdr *)(header + st->ip_off);
1007
1008 tsoh_iph->tot_len = htons(ip_length);
1009 tsoh_iph->id = htons(st->ipv4_id);
1010 } else {
1011 struct ipv6hdr *tsoh_iph =
1012 (struct ipv6hdr *)(header + st->ip_off);
1013
1014 tsoh_iph->payload_len = htons(ip_length);
1015 }
1016
1017 rc = efx_tso_put_header(tx_queue, buffer, header);
1018 if (unlikely(rc))
1019 return rc;
Ben Hutchings738a8f42009-11-29 15:16:05 +00001020 } else {
Ben Hutchingsdfa50be2013-03-08 21:20:09 +00001021 /* Send the original headers with a TSO option descriptor
1022 * in front
1023 */
1024 u8 tcp_flags = ((u8 *)tcp_hdr(skb))[13] & ~tcp_flags_clear;
Ben Hutchings738a8f42009-11-29 15:16:05 +00001025
Ben Hutchingsdfa50be2013-03-08 21:20:09 +00001026 buffer->flags = EFX_TX_BUF_OPTION;
1027 buffer->len = 0;
1028 buffer->unmap_len = 0;
1029 EFX_POPULATE_QWORD_5(buffer->option,
1030 ESF_DZ_TX_DESC_IS_OPT, 1,
1031 ESF_DZ_TX_OPTION_TYPE,
1032 ESE_DZ_TX_OPTION_DESC_TSO,
1033 ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
1034 ESF_DZ_TX_TSO_IP_ID, st->ipv4_id,
1035 ESF_DZ_TX_TSO_TCP_SEQNO, st->seqnum);
1036 ++tx_queue->insert_count;
1037
1038 /* We mapped the headers in tso_start(). Unmap them
1039 * when the last segment is completed.
1040 */
1041 buffer = &tx_queue->buffer[tx_queue->insert_count &
1042 tx_queue->ptr_mask];
1043 buffer->dma_addr = st->header_dma_addr;
1044 buffer->len = st->header_len;
1045 if (is_last) {
1046 buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_MAP_SINGLE;
1047 buffer->unmap_len = st->header_unmap_len;
1048 /* Ensure we only unmap them once in case of a
1049 * later DMA mapping error and rollback
1050 */
1051 st->header_unmap_len = 0;
1052 } else {
1053 buffer->flags = EFX_TX_BUF_CONT;
1054 buffer->unmap_len = 0;
1055 }
1056 ++tx_queue->insert_count;
Ben Hutchings738a8f42009-11-29 15:16:05 +00001057 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001058
Ben Hutchingsdfa50be2013-03-08 21:20:09 +00001059 st->seqnum += skb_shinfo(skb)->gso_size;
1060
1061 /* Linux leaves suitable gaps in the IP ID space for us to fill. */
1062 ++st->ipv4_id;
Ben Hutchingsf7251a92012-05-17 18:40:54 +01001063
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001064 ++tx_queue->tso_packets;
1065
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001066 return 0;
1067}
1068
1069
1070/**
1071 * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
1072 * @tx_queue: Efx TX queue
1073 * @skb: Socket buffer
1074 *
1075 * Context: You must hold netif_tx_lock() to call this function.
1076 *
1077 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1078 * @skb was not enqueued. In all cases @skb is consumed. Return
Ben Hutchings14bf7182012-05-22 01:27:58 +01001079 * %NETDEV_TX_OK.
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001080 */
1081static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
Ben Hutchings740847d2008-09-01 12:48:23 +01001082 struct sk_buff *skb)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001083{
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001084 struct efx_nic *efx = tx_queue->efx;
Ben Hutchings14bf7182012-05-22 01:27:58 +01001085 int frag_i, rc;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001086 struct tso_state state;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001087
Ben Hutchings738a8f42009-11-29 15:16:05 +00001088 /* Find the packet protocol and sanity-check it */
1089 state.protocol = efx_tso_check_protocol(skb);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001090
1091 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
1092
Ben Hutchingsc78c39e2013-03-08 20:03:17 +00001093 rc = tso_start(&state, efx, skb);
1094 if (rc)
1095 goto mem_err;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001096
Ben Hutchingsc78c39e2013-03-08 20:03:17 +00001097 if (likely(state.in_len == 0)) {
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001098 /* Grab the first payload fragment. */
1099 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
1100 frag_i = 0;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001101 rc = tso_get_fragment(&state, efx,
1102 skb_shinfo(skb)->frags + frag_i);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001103 if (rc)
1104 goto mem_err;
1105 } else {
Ben Hutchingsc78c39e2013-03-08 20:03:17 +00001106 /* Payload starts in the header area. */
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001107 frag_i = -1;
1108 }
1109
1110 if (tso_start_new_packet(tx_queue, skb, &state) < 0)
1111 goto mem_err;
1112
1113 while (1) {
Ben Hutchings14bf7182012-05-22 01:27:58 +01001114 tso_fill_packet_with_fragment(tx_queue, skb, &state);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001115
1116 /* Move onto the next fragment? */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001117 if (state.in_len == 0) {
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001118 if (++frag_i >= skb_shinfo(skb)->nr_frags)
1119 /* End of payload reached. */
1120 break;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001121 rc = tso_get_fragment(&state, efx,
1122 skb_shinfo(skb)->frags + frag_i);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001123 if (rc)
1124 goto mem_err;
1125 }
1126
1127 /* Start at new packet? */
1128 if (state.packet_space == 0 &&
1129 tso_start_new_packet(tx_queue, skb, &state) < 0)
1130 goto mem_err;
1131 }
1132
Eric Dumazet449fa022011-11-30 17:12:27 -05001133 netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
1134
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001135 /* Pass off to hardware */
Ben Hutchings152b6a62009-11-29 03:43:56 +00001136 efx_nic_push_buffers(tx_queue);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001137
Ben Hutchings14bf7182012-05-22 01:27:58 +01001138 efx_tx_maybe_stop_queue(tx_queue);
1139
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001140 tx_queue->tso_bursts++;
1141 return NETDEV_TX_OK;
1142
1143 mem_err:
Ben Hutchings62776d02010-06-23 11:30:07 +00001144 netif_err(efx, tx_err, efx->net_dev,
Ben Hutchings0e33d872012-05-17 17:46:55 +01001145 "Out of memory for TSO headers, or DMA mapping error\n");
Ben Hutchings9bc183d2009-11-23 16:06:47 +00001146 dev_kfree_skb_any(skb);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001147
Ben Hutchings5988b632008-09-01 12:46:36 +01001148 /* Free the DMA mapping we were in the process of writing out */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001149 if (state.unmap_len) {
Ben Hutchings7668ff92012-05-17 20:52:20 +01001150 if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE)
Ben Hutchings0e33d872012-05-17 17:46:55 +01001151 dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr,
1152 state.unmap_len, DMA_TO_DEVICE);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001153 else
Ben Hutchings0e33d872012-05-17 17:46:55 +01001154 dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr,
1155 state.unmap_len, DMA_TO_DEVICE);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001156 }
Ben Hutchings5988b632008-09-01 12:46:36 +01001157
Ben Hutchingsdfa50be2013-03-08 21:20:09 +00001158 /* Free the header DMA mapping, if using option descriptors */
1159 if (state.header_unmap_len)
1160 dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr,
1161 state.header_unmap_len, DMA_TO_DEVICE);
1162
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001163 efx_enqueue_unwind(tx_queue);
Ben Hutchings14bf7182012-05-22 01:27:58 +01001164 return NETDEV_TX_OK;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001165}