blob: 330d9111a33911bd3124fca22dee768b429c5a77 [file] [log] [blame]
Ben Hutchings8ceee662008-04-27 12:55:59 +01001/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
Ben Hutchings0a6f40c2011-02-25 00:01:34 +00004 * Copyright 2005-2010 Solarflare Communications Inc.
Ben Hutchings8ceee662008-04-27 12:55:59 +01005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/pci.h>
12#include <linux/tcp.h>
13#include <linux/ip.h>
14#include <linux/in.h>
Ben Hutchings738a8f42009-11-29 15:16:05 +000015#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/slab.h>
Ben Hutchings738a8f42009-11-29 15:16:05 +000017#include <net/ipv6.h>
Ben Hutchings8ceee662008-04-27 12:55:59 +010018#include <linux/if_ether.h>
19#include <linux/highmem.h>
20#include "net_driver.h"
Ben Hutchings8ceee662008-04-27 12:55:59 +010021#include "efx.h"
Ben Hutchings744093c2009-11-29 15:12:08 +000022#include "nic.h"
Ben Hutchings8ceee662008-04-27 12:55:59 +010023#include "workarounds.h"
24
Ben Hutchings4d566062008-09-01 12:47:12 +010025static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
Tom Herbertc3940992011-11-28 16:33:43 +000026 struct efx_tx_buffer *buffer,
27 unsigned int *pkts_compl,
28 unsigned int *bytes_compl)
Ben Hutchings8ceee662008-04-27 12:55:59 +010029{
30 if (buffer->unmap_len) {
Ben Hutchings0e33d872012-05-17 17:46:55 +010031 struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
Ben Hutchingscc12dac2008-09-01 12:46:43 +010032 dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
33 buffer->unmap_len);
Ben Hutchings7668ff92012-05-17 20:52:20 +010034 if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
Ben Hutchings0e33d872012-05-17 17:46:55 +010035 dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
36 DMA_TO_DEVICE);
Ben Hutchings8ceee662008-04-27 12:55:59 +010037 else
Ben Hutchings0e33d872012-05-17 17:46:55 +010038 dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
39 DMA_TO_DEVICE);
Ben Hutchings8ceee662008-04-27 12:55:59 +010040 buffer->unmap_len = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +010041 }
42
Ben Hutchings7668ff92012-05-17 20:52:20 +010043 if (buffer->flags & EFX_TX_BUF_SKB) {
Tom Herbertc3940992011-11-28 16:33:43 +000044 (*pkts_compl)++;
45 (*bytes_compl) += buffer->skb->len;
Ben Hutchings8ceee662008-04-27 12:55:59 +010046 dev_kfree_skb_any((struct sk_buff *) buffer->skb);
Ben Hutchings62776d02010-06-23 11:30:07 +000047 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
48 "TX queue %d transmission id %x complete\n",
49 tx_queue->queue, tx_queue->read_count);
Ben Hutchings8ceee662008-04-27 12:55:59 +010050 }
Ben Hutchings7668ff92012-05-17 20:52:20 +010051
52 buffer->flags &= EFX_TX_BUF_TSOH;
Ben Hutchings8ceee662008-04-27 12:55:59 +010053}
54
Ben Hutchingsb9b39b62008-05-07 12:51:12 +010055/**
56 * struct efx_tso_header - a DMA mapped buffer for packet headers
57 * @next: Linked list of free ones.
58 * The list is protected by the TX queue lock.
59 * @dma_unmap_len: Length to unmap for an oversize buffer, or 0.
60 * @dma_addr: The DMA address of the header below.
61 *
62 * This controls the memory used for a TSO header. Use TSOH_DATA()
63 * to find the packet header data. Use TSOH_SIZE() to calculate the
64 * total size required for a given packet header length. TSO headers
65 * in the free list are exactly %TSOH_STD_SIZE bytes in size.
66 */
67struct efx_tso_header {
68 union {
69 struct efx_tso_header *next;
70 size_t unmap_len;
71 };
72 dma_addr_t dma_addr;
73};
74
75static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
Ben Hutchings740847d2008-09-01 12:48:23 +010076 struct sk_buff *skb);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +010077static void efx_fini_tso(struct efx_tx_queue *tx_queue);
78static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue,
79 struct efx_tso_header *tsoh);
80
Ben Hutchings4d566062008-09-01 12:47:12 +010081static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
82 struct efx_tx_buffer *buffer)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +010083{
Ben Hutchings7668ff92012-05-17 20:52:20 +010084 if (buffer->flags & EFX_TX_BUF_TSOH) {
Ben Hutchingsb9b39b62008-05-07 12:51:12 +010085 if (likely(!buffer->tsoh->unmap_len)) {
86 buffer->tsoh->next = tx_queue->tso_headers_free;
87 tx_queue->tso_headers_free = buffer->tsoh;
88 } else {
89 efx_tsoh_heap_free(tx_queue, buffer->tsoh);
90 }
Ben Hutchings7668ff92012-05-17 20:52:20 +010091 buffer->flags &= ~EFX_TX_BUF_TSOH;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +010092 }
93}
94
Ben Hutchings8ceee662008-04-27 12:55:59 +010095
Ben Hutchings63f19882009-10-23 08:31:20 +000096static inline unsigned
97efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
98{
99 /* Depending on the NIC revision, we can use descriptor
100 * lengths up to 8K or 8K-1. However, since PCI Express
101 * devices must split read requests at 4K boundaries, there is
102 * little benefit from using descriptors that cross those
103 * boundaries and we keep things simple by not doing so.
104 */
Ben Hutchings5b6262d2012-02-02 21:21:15 +0000105 unsigned len = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1;
Ben Hutchings63f19882009-10-23 08:31:20 +0000106
107 /* Work around hardware bug for unaligned buffers. */
108 if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
109 len = min_t(unsigned, len, 512 - (dma_addr & 0xf));
110
111 return len;
112}
113
Ben Hutchings7e6d06f2012-07-30 15:57:44 +0000114unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
115{
116 /* Header and payload descriptor for each output segment, plus
117 * one for every input fragment boundary within a segment
118 */
119 unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
120
121 /* Possibly one more per segment for the alignment workaround */
122 if (EFX_WORKAROUND_5391(efx))
123 max_descs += EFX_TSO_MAX_SEGS;
124
125 /* Possibly more for PCIe page boundaries within input fragments */
126 if (PAGE_SIZE > EFX_PAGE_SIZE)
127 max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
128 DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
129
130 return max_descs;
131}
132
Ben Hutchings14bf7182012-05-22 01:27:58 +0100133/* Get partner of a TX queue, seen as part of the same net core queue */
134static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
135{
136 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
137 return tx_queue - EFX_TXQ_TYPE_OFFLOAD;
138 else
139 return tx_queue + EFX_TXQ_TYPE_OFFLOAD;
140}
141
142static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
143{
144 /* We need to consider both queues that the net core sees as one */
145 struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1);
146 struct efx_nic *efx = txq1->efx;
147 unsigned int fill_level;
148
149 fill_level = max(txq1->insert_count - txq1->old_read_count,
150 txq2->insert_count - txq2->old_read_count);
151 if (likely(fill_level < efx->txq_stop_thresh))
152 return;
153
154 /* We used the stale old_read_count above, which gives us a
155 * pessimistic estimate of the fill level (which may even
156 * validly be >= efx->txq_entries). Now try again using
157 * read_count (more likely to be a cache miss).
158 *
159 * If we read read_count and then conditionally stop the
160 * queue, it is possible for the completion path to race with
161 * us and complete all outstanding descriptors in the middle,
162 * after which there will be no more completions to wake it.
163 * Therefore we stop the queue first, then read read_count
164 * (with a memory barrier to ensure the ordering), then
165 * restart the queue if the fill level turns out to be low
166 * enough.
167 */
168 netif_tx_stop_queue(txq1->core_txq);
169 smp_mb();
170 txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
171 txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
172
173 fill_level = max(txq1->insert_count - txq1->old_read_count,
174 txq2->insert_count - txq2->old_read_count);
175 EFX_BUG_ON_PARANOID(fill_level >= efx->txq_entries);
176 if (likely(fill_level < efx->txq_stop_thresh)) {
177 smp_mb();
178 if (likely(!efx->loopback_selftest))
179 netif_tx_start_queue(txq1->core_txq);
180 }
181}
182
Ben Hutchings8ceee662008-04-27 12:55:59 +0100183/*
184 * Add a socket buffer to a TX queue
185 *
186 * This maps all fragments of a socket buffer for DMA and adds them to
187 * the TX queue. The queue's insert pointer will be incremented by
188 * the number of fragments in the socket buffer.
189 *
190 * If any DMA mapping fails, any mapped fragments will be unmapped,
191 * the queue's insert pointer will be restored to its original value.
192 *
Ben Hutchings497f5ba2009-11-23 16:07:05 +0000193 * This function is split out from efx_hard_start_xmit to allow the
194 * loopback test to direct packets via specific TX queues.
195 *
Ben Hutchings14bf7182012-05-22 01:27:58 +0100196 * Returns NETDEV_TX_OK.
Ben Hutchings8ceee662008-04-27 12:55:59 +0100197 * You must hold netif_tx_lock() to call this function.
198 */
Ben Hutchings497f5ba2009-11-23 16:07:05 +0000199netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100200{
201 struct efx_nic *efx = tx_queue->efx;
Ben Hutchings0e33d872012-05-17 17:46:55 +0100202 struct device *dma_dev = &efx->pci_dev->dev;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100203 struct efx_tx_buffer *buffer;
204 skb_frag_t *fragment;
Ben Hutchings14bf7182012-05-22 01:27:58 +0100205 unsigned int len, unmap_len = 0, insert_ptr;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100206 dma_addr_t dma_addr, unmap_addr = 0;
207 unsigned int dma_len;
Ben Hutchings7668ff92012-05-17 20:52:20 +0100208 unsigned short dma_flags;
Ben Hutchings14bf7182012-05-22 01:27:58 +0100209 int i = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100210
211 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
212
Ben Hutchings9bc183d2009-11-23 16:06:47 +0000213 if (skb_shinfo(skb)->gso_size)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100214 return efx_enqueue_skb_tso(tx_queue, skb);
215
Ben Hutchings8ceee662008-04-27 12:55:59 +0100216 /* Get size of the initial fragment */
217 len = skb_headlen(skb);
218
Ben Hutchingsbb145a92009-03-20 13:25:39 +0000219 /* Pad if necessary */
220 if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) {
221 EFX_BUG_ON_PARANOID(skb->data_len);
222 len = 32 + 1;
223 if (skb_pad(skb, len - skb->len))
224 return NETDEV_TX_OK;
225 }
226
Ben Hutchings0e33d872012-05-17 17:46:55 +0100227 /* Map for DMA. Use dma_map_single rather than dma_map_page
Ben Hutchings8ceee662008-04-27 12:55:59 +0100228 * since this is more efficient on machines with sparse
229 * memory.
230 */
Ben Hutchings7668ff92012-05-17 20:52:20 +0100231 dma_flags = EFX_TX_BUF_MAP_SINGLE;
Ben Hutchings0e33d872012-05-17 17:46:55 +0100232 dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100233
234 /* Process all fragments */
235 while (1) {
Ben Hutchings0e33d872012-05-17 17:46:55 +0100236 if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
237 goto dma_err;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100238
239 /* Store fields for marking in the per-fragment final
240 * descriptor */
241 unmap_len = len;
242 unmap_addr = dma_addr;
243
244 /* Add to TX queue, splitting across DMA boundaries */
245 do {
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000246 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100247 buffer = &tx_queue->buffer[insert_ptr];
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100248 efx_tsoh_free(tx_queue, buffer);
Ben Hutchings7668ff92012-05-17 20:52:20 +0100249 EFX_BUG_ON_PARANOID(buffer->flags);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100250 EFX_BUG_ON_PARANOID(buffer->len);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100251 EFX_BUG_ON_PARANOID(buffer->unmap_len);
252
Ben Hutchings63f19882009-10-23 08:31:20 +0000253 dma_len = efx_max_tx_len(efx, dma_addr);
254 if (likely(dma_len >= len))
Ben Hutchings8ceee662008-04-27 12:55:59 +0100255 dma_len = len;
256
Ben Hutchings8ceee662008-04-27 12:55:59 +0100257 /* Fill out per descriptor fields */
258 buffer->len = dma_len;
259 buffer->dma_addr = dma_addr;
Ben Hutchings7668ff92012-05-17 20:52:20 +0100260 buffer->flags = EFX_TX_BUF_CONT;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100261 len -= dma_len;
262 dma_addr += dma_len;
263 ++tx_queue->insert_count;
264 } while (len);
265
266 /* Transfer ownership of the unmapping to the final buffer */
Ben Hutchings7668ff92012-05-17 20:52:20 +0100267 buffer->flags = EFX_TX_BUF_CONT | dma_flags;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100268 buffer->unmap_len = unmap_len;
269 unmap_len = 0;
270
271 /* Get address and size of next fragment */
272 if (i >= skb_shinfo(skb)->nr_frags)
273 break;
274 fragment = &skb_shinfo(skb)->frags[i];
Eric Dumazet9e903e02011-10-18 21:00:24 +0000275 len = skb_frag_size(fragment);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100276 i++;
277 /* Map for DMA */
Ben Hutchings7668ff92012-05-17 20:52:20 +0100278 dma_flags = 0;
Ben Hutchings0e33d872012-05-17 17:46:55 +0100279 dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
Ian Campbell5d6bcdf2011-10-06 11:10:48 +0100280 DMA_TO_DEVICE);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100281 }
282
283 /* Transfer ownership of the skb to the final buffer */
284 buffer->skb = skb;
Ben Hutchings7668ff92012-05-17 20:52:20 +0100285 buffer->flags = EFX_TX_BUF_SKB | dma_flags;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100286
Tom Herbertc3940992011-11-28 16:33:43 +0000287 netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
288
Ben Hutchings8ceee662008-04-27 12:55:59 +0100289 /* Pass off to hardware */
Ben Hutchings152b6a62009-11-29 03:43:56 +0000290 efx_nic_push_buffers(tx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100291
Ben Hutchings14bf7182012-05-22 01:27:58 +0100292 efx_tx_maybe_stop_queue(tx_queue);
293
Ben Hutchings8ceee662008-04-27 12:55:59 +0100294 return NETDEV_TX_OK;
295
Ben Hutchings0e33d872012-05-17 17:46:55 +0100296 dma_err:
Ben Hutchings62776d02010-06-23 11:30:07 +0000297 netif_err(efx, tx_err, efx->net_dev,
298 " TX queue %d could not map skb with %d bytes %d "
299 "fragments for DMA\n", tx_queue->queue, skb->len,
300 skb_shinfo(skb)->nr_frags + 1);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100301
302 /* Mark the packet as transmitted, and free the SKB ourselves */
Ben Hutchings9bc183d2009-11-23 16:06:47 +0000303 dev_kfree_skb_any(skb);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100304
Ben Hutchings8ceee662008-04-27 12:55:59 +0100305 /* Work backwards until we hit the original insert pointer value */
306 while (tx_queue->insert_count != tx_queue->write_count) {
Tom Herbertc3940992011-11-28 16:33:43 +0000307 unsigned int pkts_compl = 0, bytes_compl = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100308 --tx_queue->insert_count;
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000309 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100310 buffer = &tx_queue->buffer[insert_ptr];
Tom Herbertc3940992011-11-28 16:33:43 +0000311 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100312 buffer->len = 0;
313 }
314
315 /* Free the fragment we were mid-way through pushing */
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100316 if (unmap_len) {
Ben Hutchings7668ff92012-05-17 20:52:20 +0100317 if (dma_flags & EFX_TX_BUF_MAP_SINGLE)
Ben Hutchings0e33d872012-05-17 17:46:55 +0100318 dma_unmap_single(dma_dev, unmap_addr, unmap_len,
319 DMA_TO_DEVICE);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100320 else
Ben Hutchings0e33d872012-05-17 17:46:55 +0100321 dma_unmap_page(dma_dev, unmap_addr, unmap_len,
322 DMA_TO_DEVICE);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100323 }
Ben Hutchings8ceee662008-04-27 12:55:59 +0100324
Ben Hutchings14bf7182012-05-22 01:27:58 +0100325 return NETDEV_TX_OK;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100326}
327
328/* Remove packets from the TX queue
329 *
330 * This removes packets from the TX queue, up to and including the
331 * specified index.
332 */
Ben Hutchings4d566062008-09-01 12:47:12 +0100333static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
Tom Herbertc3940992011-11-28 16:33:43 +0000334 unsigned int index,
335 unsigned int *pkts_compl,
336 unsigned int *bytes_compl)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100337{
338 struct efx_nic *efx = tx_queue->efx;
339 unsigned int stop_index, read_ptr;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100340
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000341 stop_index = (index + 1) & tx_queue->ptr_mask;
342 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100343
344 while (read_ptr != stop_index) {
345 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
346 if (unlikely(buffer->len == 0)) {
Ben Hutchings62776d02010-06-23 11:30:07 +0000347 netif_err(efx, tx_err, efx->net_dev,
348 "TX queue %d spurious TX completion id %x\n",
349 tx_queue->queue, read_ptr);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100350 efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
351 return;
352 }
353
Tom Herbertc3940992011-11-28 16:33:43 +0000354 efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100355 buffer->len = 0;
356
357 ++tx_queue->read_count;
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000358 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100359 }
360}
361
Ben Hutchings8ceee662008-04-27 12:55:59 +0100362/* Initiate a packet transmission. We use one channel per CPU
363 * (sharing when we have more CPUs than channels). On Falcon, the TX
364 * completion events will be directed back to the CPU that transmitted
365 * the packet, which should be cache-efficient.
366 *
367 * Context: non-blocking.
368 * Note that returning anything other than NETDEV_TX_OK will cause the
369 * OS to free the skb.
370 */
Stephen Hemminger613573252009-08-31 19:50:58 +0000371netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
Ben Hutchings2d0cc562012-02-17 00:10:45 +0000372 struct net_device *net_dev)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100373{
Ben Hutchings767e4682008-09-01 12:43:14 +0100374 struct efx_nic *efx = netdev_priv(net_dev);
Ben Hutchings60ac1062008-09-01 12:44:59 +0100375 struct efx_tx_queue *tx_queue;
Ben Hutchings94b274b2011-01-10 21:18:20 +0000376 unsigned index, type;
Ben Hutchings60ac1062008-09-01 12:44:59 +0100377
Ben Hutchingse4abce82011-05-16 18:51:24 +0100378 EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
Ben Hutchingsa7ef5932009-03-04 09:52:37 +0000379
Ben Hutchings94b274b2011-01-10 21:18:20 +0000380 index = skb_get_queue_mapping(skb);
381 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
382 if (index >= efx->n_tx_channels) {
383 index -= efx->n_tx_channels;
384 type |= EFX_TXQ_TYPE_HIGHPRI;
385 }
386 tx_queue = efx_get_tx_queue(efx, index, type);
Ben Hutchings60ac1062008-09-01 12:44:59 +0100387
Ben Hutchings497f5ba2009-11-23 16:07:05 +0000388 return efx_enqueue_skb(tx_queue, skb);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100389}
390
Ben Hutchings60031fc2011-01-12 18:39:40 +0000391void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
392{
Ben Hutchings94b274b2011-01-10 21:18:20 +0000393 struct efx_nic *efx = tx_queue->efx;
394
Ben Hutchings60031fc2011-01-12 18:39:40 +0000395 /* Must be inverse of queue lookup in efx_hard_start_xmit() */
Ben Hutchings94b274b2011-01-10 21:18:20 +0000396 tx_queue->core_txq =
397 netdev_get_tx_queue(efx->net_dev,
398 tx_queue->queue / EFX_TXQ_TYPES +
399 ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
400 efx->n_tx_channels : 0));
401}
402
403int efx_setup_tc(struct net_device *net_dev, u8 num_tc)
404{
405 struct efx_nic *efx = netdev_priv(net_dev);
406 struct efx_channel *channel;
407 struct efx_tx_queue *tx_queue;
408 unsigned tc;
409 int rc;
410
411 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
412 return -EINVAL;
413
414 if (num_tc == net_dev->num_tc)
415 return 0;
416
417 for (tc = 0; tc < num_tc; tc++) {
418 net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
419 net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
420 }
421
422 if (num_tc > net_dev->num_tc) {
423 /* Initialise high-priority queues as necessary */
424 efx_for_each_channel(channel, efx) {
425 efx_for_each_possible_channel_tx_queue(tx_queue,
426 channel) {
427 if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
428 continue;
429 if (!tx_queue->buffer) {
430 rc = efx_probe_tx_queue(tx_queue);
431 if (rc)
432 return rc;
433 }
434 if (!tx_queue->initialised)
435 efx_init_tx_queue(tx_queue);
436 efx_init_tx_queue_core_txq(tx_queue);
437 }
438 }
439 } else {
440 /* Reduce number of classes before number of queues */
441 net_dev->num_tc = num_tc;
442 }
443
444 rc = netif_set_real_num_tx_queues(net_dev,
445 max_t(int, num_tc, 1) *
446 efx->n_tx_channels);
447 if (rc)
448 return rc;
449
450 /* Do not destroy high-priority queues when they become
451 * unused. We would have to flush them first, and it is
452 * fairly difficult to flush a subset of TX queues. Leave
453 * it to efx_fini_channels().
454 */
455
456 net_dev->num_tc = num_tc;
457 return 0;
Ben Hutchings60031fc2011-01-12 18:39:40 +0000458}
459
Ben Hutchings8ceee662008-04-27 12:55:59 +0100460void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
461{
462 unsigned fill_level;
463 struct efx_nic *efx = tx_queue->efx;
Ben Hutchings14bf7182012-05-22 01:27:58 +0100464 struct efx_tx_queue *txq2;
Tom Herbertc3940992011-11-28 16:33:43 +0000465 unsigned int pkts_compl = 0, bytes_compl = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100466
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000467 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100468
Tom Herbertc3940992011-11-28 16:33:43 +0000469 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
470 netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100471
Ben Hutchings14bf7182012-05-22 01:27:58 +0100472 /* See if we need to restart the netif queue. This memory
473 * barrier ensures that we write read_count (inside
474 * efx_dequeue_buffers()) before reading the queue status.
475 */
Ben Hutchings8ceee662008-04-27 12:55:59 +0100476 smp_mb();
Ben Hutchingsc04bfc62010-12-10 01:24:16 +0000477 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
Neil Turton9d1aea62011-04-04 13:46:23 +0100478 likely(efx->port_enabled) &&
Ben Hutchingse4abce82011-05-16 18:51:24 +0100479 likely(netif_device_present(efx->net_dev))) {
Ben Hutchings14bf7182012-05-22 01:27:58 +0100480 txq2 = efx_tx_queue_partner(tx_queue);
481 fill_level = max(tx_queue->insert_count - tx_queue->read_count,
482 txq2->insert_count - txq2->read_count);
483 if (fill_level <= efx->txq_wake_thresh)
Ben Hutchingsc04bfc62010-12-10 01:24:16 +0000484 netif_tx_wake_queue(tx_queue->core_txq);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100485 }
Ben Hutchingscd385572010-11-15 23:53:11 +0000486
487 /* Check whether the hardware queue is now empty */
488 if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
489 tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
490 if (tx_queue->read_count == tx_queue->old_write_count) {
491 smp_mb();
492 tx_queue->empty_read_count =
493 tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
494 }
495 }
Ben Hutchings8ceee662008-04-27 12:55:59 +0100496}
497
498int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
499{
500 struct efx_nic *efx = tx_queue->efx;
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000501 unsigned int entries;
Ben Hutchings7668ff92012-05-17 20:52:20 +0100502 int rc;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100503
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000504 /* Create the smallest power-of-two aligned ring */
505 entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
506 EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
507 tx_queue->ptr_mask = entries - 1;
508
509 netif_dbg(efx, probe, efx->net_dev,
510 "creating TX queue %d size %#x mask %#x\n",
511 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100512
513 /* Allocate software ring */
Thomas Meyerc2e4e252011-12-02 12:36:13 +0000514 tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000515 GFP_KERNEL);
Ben Hutchings60ac1062008-09-01 12:44:59 +0100516 if (!tx_queue->buffer)
517 return -ENOMEM;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100518
519 /* Allocate hardware ring */
Ben Hutchings152b6a62009-11-29 03:43:56 +0000520 rc = efx_nic_probe_tx(tx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100521 if (rc)
Ben Hutchings60ac1062008-09-01 12:44:59 +0100522 goto fail;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100523
524 return 0;
525
Ben Hutchings60ac1062008-09-01 12:44:59 +0100526 fail:
Ben Hutchings8ceee662008-04-27 12:55:59 +0100527 kfree(tx_queue->buffer);
528 tx_queue->buffer = NULL;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100529 return rc;
530}
531
Ben Hutchingsbc3c90a2008-09-01 12:48:46 +0100532void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100533{
Ben Hutchings62776d02010-06-23 11:30:07 +0000534 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
535 "initialising TX queue %d\n", tx_queue->queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100536
537 tx_queue->insert_count = 0;
538 tx_queue->write_count = 0;
Ben Hutchingscd385572010-11-15 23:53:11 +0000539 tx_queue->old_write_count = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100540 tx_queue->read_count = 0;
541 tx_queue->old_read_count = 0;
Ben Hutchingscd385572010-11-15 23:53:11 +0000542 tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100543
544 /* Set up TX descriptor ring */
Ben Hutchings152b6a62009-11-29 03:43:56 +0000545 efx_nic_init_tx(tx_queue);
Ben Hutchings94b274b2011-01-10 21:18:20 +0000546
547 tx_queue->initialised = true;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100548}
549
550void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
551{
552 struct efx_tx_buffer *buffer;
553
554 if (!tx_queue->buffer)
555 return;
556
557 /* Free any buffers left in the ring */
558 while (tx_queue->read_count != tx_queue->write_count) {
Tom Herbertc3940992011-11-28 16:33:43 +0000559 unsigned int pkts_compl = 0, bytes_compl = 0;
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000560 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
Tom Herbertc3940992011-11-28 16:33:43 +0000561 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100562 buffer->len = 0;
563
564 ++tx_queue->read_count;
565 }
Tom Herbertc3940992011-11-28 16:33:43 +0000566 netdev_tx_reset_queue(tx_queue->core_txq);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100567}
568
569void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
570{
Ben Hutchings94b274b2011-01-10 21:18:20 +0000571 if (!tx_queue->initialised)
572 return;
573
Ben Hutchings62776d02010-06-23 11:30:07 +0000574 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
575 "shutting down TX queue %d\n", tx_queue->queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100576
Ben Hutchings94b274b2011-01-10 21:18:20 +0000577 tx_queue->initialised = false;
578
Ben Hutchings8ceee662008-04-27 12:55:59 +0100579 /* Flush TX queue, remove descriptor ring */
Ben Hutchings152b6a62009-11-29 03:43:56 +0000580 efx_nic_fini_tx(tx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100581
582 efx_release_tx_buffers(tx_queue);
583
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100584 /* Free up TSO header cache */
585 efx_fini_tso(tx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100586}
587
588void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
589{
Ben Hutchings94b274b2011-01-10 21:18:20 +0000590 if (!tx_queue->buffer)
591 return;
592
Ben Hutchings62776d02010-06-23 11:30:07 +0000593 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
594 "destroying TX queue %d\n", tx_queue->queue);
Ben Hutchings152b6a62009-11-29 03:43:56 +0000595 efx_nic_remove_tx(tx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100596
597 kfree(tx_queue->buffer);
598 tx_queue->buffer = NULL;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100599}
600
601
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100602/* Efx TCP segmentation acceleration.
603 *
604 * Why? Because by doing it here in the driver we can go significantly
605 * faster than the GSO.
606 *
607 * Requires TX checksum offload support.
608 */
609
610/* Number of bytes inserted at the start of a TSO header buffer,
611 * similar to NET_IP_ALIGN.
612 */
Ben Hutchings13e9ab12008-09-01 12:50:28 +0100613#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100614#define TSOH_OFFSET 0
615#else
616#define TSOH_OFFSET NET_IP_ALIGN
617#endif
618
619#define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET)
620
621/* Total size of struct efx_tso_header, buffer and padding */
622#define TSOH_SIZE(hdr_len) \
623 (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len)
624
625/* Size of blocks on free list. Larger blocks must be allocated from
626 * the heap.
627 */
628#define TSOH_STD_SIZE 128
629
630#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
631#define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data)
632#define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data)
633#define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data)
Ben Hutchings738a8f42009-11-29 15:16:05 +0000634#define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100635
636/**
637 * struct tso_state - TSO state for an SKB
Ben Hutchings23d9e602008-09-01 12:47:02 +0100638 * @out_len: Remaining length in current segment
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100639 * @seqnum: Current sequence number
Ben Hutchings23d9e602008-09-01 12:47:02 +0100640 * @ipv4_id: Current IPv4 ID, host endian
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100641 * @packet_space: Remaining space in current packet
Ben Hutchings23d9e602008-09-01 12:47:02 +0100642 * @dma_addr: DMA address of current position
643 * @in_len: Remaining length in current SKB fragment
644 * @unmap_len: Length of SKB fragment
645 * @unmap_addr: DMA address of SKB fragment
Ben Hutchings7668ff92012-05-17 20:52:20 +0100646 * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0
Ben Hutchings738a8f42009-11-29 15:16:05 +0000647 * @protocol: Network protocol (after any VLAN header)
Ben Hutchings23d9e602008-09-01 12:47:02 +0100648 * @header_len: Number of bytes of header
649 * @full_packet_size: Number of bytes to put in each outgoing segment
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100650 *
651 * The state used during segmentation. It is put into this data structure
652 * just to make it easy to pass into inline functions.
653 */
654struct tso_state {
Ben Hutchings23d9e602008-09-01 12:47:02 +0100655 /* Output position */
656 unsigned out_len;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100657 unsigned seqnum;
Ben Hutchings23d9e602008-09-01 12:47:02 +0100658 unsigned ipv4_id;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100659 unsigned packet_space;
660
Ben Hutchings23d9e602008-09-01 12:47:02 +0100661 /* Input position */
662 dma_addr_t dma_addr;
663 unsigned in_len;
664 unsigned unmap_len;
665 dma_addr_t unmap_addr;
Ben Hutchings7668ff92012-05-17 20:52:20 +0100666 unsigned short dma_flags;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100667
Ben Hutchings738a8f42009-11-29 15:16:05 +0000668 __be16 protocol;
Ben Hutchings23d9e602008-09-01 12:47:02 +0100669 unsigned header_len;
670 int full_packet_size;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100671};
672
673
674/*
675 * Verify that our various assumptions about sk_buffs and the conditions
Ben Hutchings738a8f42009-11-29 15:16:05 +0000676 * under which TSO will be attempted hold true. Return the protocol number.
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100677 */
Ben Hutchings738a8f42009-11-29 15:16:05 +0000678static __be16 efx_tso_check_protocol(struct sk_buff *skb)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100679{
Ben Hutchings740847d2008-09-01 12:48:23 +0100680 __be16 protocol = skb->protocol;
681
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100682 EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
Ben Hutchings740847d2008-09-01 12:48:23 +0100683 protocol);
684 if (protocol == htons(ETH_P_8021Q)) {
Ben Hutchings740847d2008-09-01 12:48:23 +0100685 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
686 protocol = veh->h_vlan_encapsulated_proto;
Ben Hutchings740847d2008-09-01 12:48:23 +0100687 }
688
Ben Hutchings738a8f42009-11-29 15:16:05 +0000689 if (protocol == htons(ETH_P_IP)) {
690 EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
691 } else {
692 EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6));
693 EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP);
694 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100695 EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
696 + (tcp_hdr(skb)->doff << 2u)) >
697 skb_headlen(skb));
Ben Hutchings738a8f42009-11-29 15:16:05 +0000698
699 return protocol;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100700}
701
702
703/*
704 * Allocate a page worth of efx_tso_header structures, and string them
705 * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM.
706 */
707static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
708{
Ben Hutchings0e33d872012-05-17 17:46:55 +0100709 struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100710 struct efx_tso_header *tsoh;
711 dma_addr_t dma_addr;
712 u8 *base_kva, *kva;
713
Ben Hutchings0e33d872012-05-17 17:46:55 +0100714 base_kva = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr, GFP_ATOMIC);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100715 if (base_kva == NULL) {
Ben Hutchings62776d02010-06-23 11:30:07 +0000716 netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev,
717 "Unable to allocate page for TSO headers\n");
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100718 return -ENOMEM;
719 }
720
Ben Hutchings0e33d872012-05-17 17:46:55 +0100721 /* dma_alloc_coherent() allocates pages. */
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100722 EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u));
723
724 for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) {
725 tsoh = (struct efx_tso_header *)kva;
726 tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva);
727 tsoh->next = tx_queue->tso_headers_free;
728 tx_queue->tso_headers_free = tsoh;
729 }
730
731 return 0;
732}
733
734
735/* Free up a TSO header, and all others in the same page. */
736static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
737 struct efx_tso_header *tsoh,
Ben Hutchings0e33d872012-05-17 17:46:55 +0100738 struct device *dma_dev)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100739{
740 struct efx_tso_header **p;
741 unsigned long base_kva;
742 dma_addr_t base_dma;
743
744 base_kva = (unsigned long)tsoh & PAGE_MASK;
745 base_dma = tsoh->dma_addr & PAGE_MASK;
746
747 p = &tx_queue->tso_headers_free;
Ben Hutchingsb3475642008-05-16 21:15:49 +0100748 while (*p != NULL) {
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100749 if (((unsigned long)*p & PAGE_MASK) == base_kva)
750 *p = (*p)->next;
751 else
752 p = &(*p)->next;
Ben Hutchingsb3475642008-05-16 21:15:49 +0100753 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100754
Ben Hutchings0e33d872012-05-17 17:46:55 +0100755 dma_free_coherent(dma_dev, PAGE_SIZE, (void *)base_kva, base_dma);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100756}
757
758static struct efx_tso_header *
759efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
760{
761 struct efx_tso_header *tsoh;
762
763 tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA);
764 if (unlikely(!tsoh))
765 return NULL;
766
Ben Hutchings0e33d872012-05-17 17:46:55 +0100767 tsoh->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100768 TSOH_BUFFER(tsoh), header_len,
Ben Hutchings0e33d872012-05-17 17:46:55 +0100769 DMA_TO_DEVICE);
770 if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
771 tsoh->dma_addr))) {
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100772 kfree(tsoh);
773 return NULL;
774 }
775
776 tsoh->unmap_len = header_len;
777 return tsoh;
778}
779
780static void
781efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
782{
Ben Hutchings0e33d872012-05-17 17:46:55 +0100783 dma_unmap_single(&tx_queue->efx->pci_dev->dev,
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100784 tsoh->dma_addr, tsoh->unmap_len,
Ben Hutchings0e33d872012-05-17 17:46:55 +0100785 DMA_TO_DEVICE);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100786 kfree(tsoh);
787}
788
789/**
790 * efx_tx_queue_insert - push descriptors onto the TX queue
791 * @tx_queue: Efx TX queue
792 * @dma_addr: DMA address of fragment
793 * @len: Length of fragment
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100794 * @final_buffer: The final buffer inserted into the queue
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100795 *
Ben Hutchings14bf7182012-05-22 01:27:58 +0100796 * Push descriptors onto the TX queue.
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100797 */
Ben Hutchings14bf7182012-05-22 01:27:58 +0100798static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
799 dma_addr_t dma_addr, unsigned len,
800 struct efx_tx_buffer **final_buffer)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100801{
802 struct efx_tx_buffer *buffer;
803 struct efx_nic *efx = tx_queue->efx;
Ben Hutchings14bf7182012-05-22 01:27:58 +0100804 unsigned dma_len, insert_ptr;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100805
806 EFX_BUG_ON_PARANOID(len <= 0);
807
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100808 while (1) {
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000809 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100810 buffer = &tx_queue->buffer[insert_ptr];
811 ++tx_queue->insert_count;
812
813 EFX_BUG_ON_PARANOID(tx_queue->insert_count -
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000814 tx_queue->read_count >=
815 efx->txq_entries);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100816
817 efx_tsoh_free(tx_queue, buffer);
818 EFX_BUG_ON_PARANOID(buffer->len);
819 EFX_BUG_ON_PARANOID(buffer->unmap_len);
Ben Hutchings7668ff92012-05-17 20:52:20 +0100820 EFX_BUG_ON_PARANOID(buffer->flags);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100821
822 buffer->dma_addr = dma_addr;
823
Ben Hutchings63f19882009-10-23 08:31:20 +0000824 dma_len = efx_max_tx_len(efx, dma_addr);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100825
826 /* If there is enough space to send then do so */
827 if (dma_len >= len)
828 break;
829
Ben Hutchings7668ff92012-05-17 20:52:20 +0100830 buffer->len = dma_len;
831 buffer->flags = EFX_TX_BUF_CONT;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100832 dma_addr += dma_len;
833 len -= dma_len;
834 }
835
836 EFX_BUG_ON_PARANOID(!len);
837 buffer->len = len;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100838 *final_buffer = buffer;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100839}
840
841
842/*
843 * Put a TSO header into the TX queue.
844 *
845 * This is special-cased because we know that it is small enough to fit in
846 * a single fragment, and we know it doesn't cross a page boundary. It
847 * also allows us to not worry about end-of-packet etc.
848 */
Ben Hutchings4d566062008-09-01 12:47:12 +0100849static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
850 struct efx_tso_header *tsoh, unsigned len)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100851{
852 struct efx_tx_buffer *buffer;
853
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000854 buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100855 efx_tsoh_free(tx_queue, buffer);
856 EFX_BUG_ON_PARANOID(buffer->len);
857 EFX_BUG_ON_PARANOID(buffer->unmap_len);
Ben Hutchings7668ff92012-05-17 20:52:20 +0100858 EFX_BUG_ON_PARANOID(buffer->flags);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100859 buffer->len = len;
860 buffer->dma_addr = tsoh->dma_addr;
861 buffer->tsoh = tsoh;
Ben Hutchings7668ff92012-05-17 20:52:20 +0100862 buffer->flags = EFX_TX_BUF_TSOH | EFX_TX_BUF_CONT;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100863
864 ++tx_queue->insert_count;
865}
866
867
868/* Remove descriptors put into a tx_queue. */
869static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
870{
871 struct efx_tx_buffer *buffer;
Ben Hutchingscc12dac2008-09-01 12:46:43 +0100872 dma_addr_t unmap_addr;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100873
874 /* Work backwards until we hit the original insert pointer value */
875 while (tx_queue->insert_count != tx_queue->write_count) {
876 --tx_queue->insert_count;
877 buffer = &tx_queue->buffer[tx_queue->insert_count &
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000878 tx_queue->ptr_mask];
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100879 efx_tsoh_free(tx_queue, buffer);
Ben Hutchings7668ff92012-05-17 20:52:20 +0100880 EFX_BUG_ON_PARANOID(buffer->flags & EFX_TX_BUF_SKB);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100881 if (buffer->unmap_len) {
Ben Hutchingscc12dac2008-09-01 12:46:43 +0100882 unmap_addr = (buffer->dma_addr + buffer->len -
883 buffer->unmap_len);
Ben Hutchings7668ff92012-05-17 20:52:20 +0100884 if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
Ben Hutchings0e33d872012-05-17 17:46:55 +0100885 dma_unmap_single(&tx_queue->efx->pci_dev->dev,
Ben Hutchingscc12dac2008-09-01 12:46:43 +0100886 unmap_addr, buffer->unmap_len,
Ben Hutchings0e33d872012-05-17 17:46:55 +0100887 DMA_TO_DEVICE);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100888 else
Ben Hutchings0e33d872012-05-17 17:46:55 +0100889 dma_unmap_page(&tx_queue->efx->pci_dev->dev,
Ben Hutchingscc12dac2008-09-01 12:46:43 +0100890 unmap_addr, buffer->unmap_len,
Ben Hutchings0e33d872012-05-17 17:46:55 +0100891 DMA_TO_DEVICE);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100892 buffer->unmap_len = 0;
893 }
Neil Turtona7ebd272009-12-23 13:47:13 +0000894 buffer->len = 0;
Ben Hutchings7668ff92012-05-17 20:52:20 +0100895 buffer->flags = 0;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100896 }
897}
898
899
900/* Parse the SKB header and initialise state. */
Ben Hutchings4d566062008-09-01 12:47:12 +0100901static void tso_start(struct tso_state *st, const struct sk_buff *skb)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100902{
903 /* All ethernet/IP/TCP headers combined size is TCP header size
904 * plus offset of TCP header relative to start of packet.
905 */
Ben Hutchings23d9e602008-09-01 12:47:02 +0100906 st->header_len = ((tcp_hdr(skb)->doff << 2u)
907 + PTR_DIFF(tcp_hdr(skb), skb->data));
908 st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100909
Ben Hutchings738a8f42009-11-29 15:16:05 +0000910 if (st->protocol == htons(ETH_P_IP))
911 st->ipv4_id = ntohs(ip_hdr(skb)->id);
912 else
913 st->ipv4_id = 0;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100914 st->seqnum = ntohl(tcp_hdr(skb)->seq);
915
916 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
917 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
918 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
919
Ben Hutchings23d9e602008-09-01 12:47:02 +0100920 st->out_len = skb->len - st->header_len;
921 st->unmap_len = 0;
Ben Hutchings7668ff92012-05-17 20:52:20 +0100922 st->dma_flags = 0;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100923}
924
Ben Hutchings4d566062008-09-01 12:47:12 +0100925static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
926 skb_frag_t *frag)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100927{
Ian Campbell4a22c4c2011-09-21 21:53:16 +0000928 st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000929 skb_frag_size(frag), DMA_TO_DEVICE);
Ian Campbell5d6bcdf2011-10-06 11:10:48 +0100930 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
Ben Hutchings7668ff92012-05-17 20:52:20 +0100931 st->dma_flags = 0;
Eric Dumazet9e903e02011-10-18 21:00:24 +0000932 st->unmap_len = skb_frag_size(frag);
933 st->in_len = skb_frag_size(frag);
Ben Hutchings23d9e602008-09-01 12:47:02 +0100934 st->dma_addr = st->unmap_addr;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100935 return 0;
936 }
937 return -ENOMEM;
938}
939
Ben Hutchings4d566062008-09-01 12:47:12 +0100940static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
941 const struct sk_buff *skb)
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100942{
Ben Hutchings23d9e602008-09-01 12:47:02 +0100943 int hl = st->header_len;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100944 int len = skb_headlen(skb) - hl;
945
Ben Hutchings0e33d872012-05-17 17:46:55 +0100946 st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl,
947 len, DMA_TO_DEVICE);
948 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
Ben Hutchings7668ff92012-05-17 20:52:20 +0100949 st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
Ben Hutchings23d9e602008-09-01 12:47:02 +0100950 st->unmap_len = len;
951 st->in_len = len;
952 st->dma_addr = st->unmap_addr;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100953 return 0;
954 }
955 return -ENOMEM;
956}
957
958
959/**
960 * tso_fill_packet_with_fragment - form descriptors for the current fragment
961 * @tx_queue: Efx TX queue
962 * @skb: Socket buffer
963 * @st: TSO state
964 *
965 * Form descriptors for the current fragment, until we reach the end
Ben Hutchings14bf7182012-05-22 01:27:58 +0100966 * of fragment or end-of-packet.
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100967 */
Ben Hutchings14bf7182012-05-22 01:27:58 +0100968static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
969 const struct sk_buff *skb,
970 struct tso_state *st)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100971{
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100972 struct efx_tx_buffer *buffer;
Ben Hutchings14bf7182012-05-22 01:27:58 +0100973 int n;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100974
Ben Hutchings23d9e602008-09-01 12:47:02 +0100975 if (st->in_len == 0)
Ben Hutchings14bf7182012-05-22 01:27:58 +0100976 return;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100977 if (st->packet_space == 0)
Ben Hutchings14bf7182012-05-22 01:27:58 +0100978 return;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100979
Ben Hutchings23d9e602008-09-01 12:47:02 +0100980 EFX_BUG_ON_PARANOID(st->in_len <= 0);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100981 EFX_BUG_ON_PARANOID(st->packet_space <= 0);
982
Ben Hutchings23d9e602008-09-01 12:47:02 +0100983 n = min(st->in_len, st->packet_space);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100984
985 st->packet_space -= n;
Ben Hutchings23d9e602008-09-01 12:47:02 +0100986 st->out_len -= n;
987 st->in_len -= n;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100988
Ben Hutchings14bf7182012-05-22 01:27:58 +0100989 efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100990
Ben Hutchings14bf7182012-05-22 01:27:58 +0100991 if (st->out_len == 0) {
992 /* Transfer ownership of the skb */
993 buffer->skb = skb;
994 buffer->flags = EFX_TX_BUF_SKB;
995 } else if (st->packet_space != 0) {
996 buffer->flags = EFX_TX_BUF_CONT;
997 }
998
999 if (st->in_len == 0) {
1000 /* Transfer ownership of the DMA mapping */
1001 buffer->unmap_len = st->unmap_len;
1002 buffer->flags |= st->dma_flags;
1003 st->unmap_len = 0;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001004 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001005
Ben Hutchings23d9e602008-09-01 12:47:02 +01001006 st->dma_addr += n;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001007}
1008
1009
1010/**
1011 * tso_start_new_packet - generate a new header and prepare for the new packet
1012 * @tx_queue: Efx TX queue
1013 * @skb: Socket buffer
1014 * @st: TSO state
1015 *
1016 * Generate a new header and prepare for the new packet. Return 0 on
1017 * success, or -1 if failed to alloc header.
1018 */
Ben Hutchings4d566062008-09-01 12:47:12 +01001019static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
1020 const struct sk_buff *skb,
1021 struct tso_state *st)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001022{
1023 struct efx_tso_header *tsoh;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001024 struct tcphdr *tsoh_th;
1025 unsigned ip_length;
1026 u8 *header;
1027
1028 /* Allocate a DMA-mapped header buffer. */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001029 if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) {
Ben Hutchingsb3475642008-05-16 21:15:49 +01001030 if (tx_queue->tso_headers_free == NULL) {
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001031 if (efx_tsoh_block_alloc(tx_queue))
1032 return -1;
Ben Hutchingsb3475642008-05-16 21:15:49 +01001033 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001034 EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
1035 tsoh = tx_queue->tso_headers_free;
1036 tx_queue->tso_headers_free = tsoh->next;
1037 tsoh->unmap_len = 0;
1038 } else {
1039 tx_queue->tso_long_headers++;
Ben Hutchings23d9e602008-09-01 12:47:02 +01001040 tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001041 if (unlikely(!tsoh))
1042 return -1;
1043 }
1044
1045 header = TSOH_BUFFER(tsoh);
1046 tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb));
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001047
1048 /* Copy and update the headers. */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001049 memcpy(header, skb->data, st->header_len);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001050
1051 tsoh_th->seq = htonl(st->seqnum);
1052 st->seqnum += skb_shinfo(skb)->gso_size;
Ben Hutchings23d9e602008-09-01 12:47:02 +01001053 if (st->out_len > skb_shinfo(skb)->gso_size) {
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001054 /* This packet will not finish the TSO burst. */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001055 ip_length = st->full_packet_size - ETH_HDR_LEN(skb);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001056 tsoh_th->fin = 0;
1057 tsoh_th->psh = 0;
1058 } else {
1059 /* This packet will be the last in the TSO burst. */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001060 ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001061 tsoh_th->fin = tcp_hdr(skb)->fin;
1062 tsoh_th->psh = tcp_hdr(skb)->psh;
1063 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001064
Ben Hutchings738a8f42009-11-29 15:16:05 +00001065 if (st->protocol == htons(ETH_P_IP)) {
1066 struct iphdr *tsoh_iph =
1067 (struct iphdr *)(header + SKB_IPV4_OFF(skb));
1068
1069 tsoh_iph->tot_len = htons(ip_length);
1070
1071 /* Linux leaves suitable gaps in the IP ID space for us to fill. */
1072 tsoh_iph->id = htons(st->ipv4_id);
1073 st->ipv4_id++;
1074 } else {
1075 struct ipv6hdr *tsoh_iph =
1076 (struct ipv6hdr *)(header + SKB_IPV6_OFF(skb));
1077
1078 tsoh_iph->payload_len = htons(ip_length - sizeof(*tsoh_iph));
1079 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001080
1081 st->packet_space = skb_shinfo(skb)->gso_size;
1082 ++tx_queue->tso_packets;
1083
1084 /* Form a descriptor for this header. */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001085 efx_tso_put_header(tx_queue, tsoh, st->header_len);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001086
1087 return 0;
1088}
1089
1090
1091/**
1092 * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
1093 * @tx_queue: Efx TX queue
1094 * @skb: Socket buffer
1095 *
1096 * Context: You must hold netif_tx_lock() to call this function.
1097 *
1098 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1099 * @skb was not enqueued. In all cases @skb is consumed. Return
Ben Hutchings14bf7182012-05-22 01:27:58 +01001100 * %NETDEV_TX_OK.
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001101 */
1102static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
Ben Hutchings740847d2008-09-01 12:48:23 +01001103 struct sk_buff *skb)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001104{
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001105 struct efx_nic *efx = tx_queue->efx;
Ben Hutchings14bf7182012-05-22 01:27:58 +01001106 int frag_i, rc;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001107 struct tso_state state;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001108
Ben Hutchings738a8f42009-11-29 15:16:05 +00001109 /* Find the packet protocol and sanity-check it */
1110 state.protocol = efx_tso_check_protocol(skb);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001111
1112 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
1113
1114 tso_start(&state, skb);
1115
1116 /* Assume that skb header area contains exactly the headers, and
1117 * all payload is in the frag list.
1118 */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001119 if (skb_headlen(skb) == state.header_len) {
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001120 /* Grab the first payload fragment. */
1121 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
1122 frag_i = 0;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001123 rc = tso_get_fragment(&state, efx,
1124 skb_shinfo(skb)->frags + frag_i);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001125 if (rc)
1126 goto mem_err;
1127 } else {
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001128 rc = tso_get_head_fragment(&state, efx, skb);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001129 if (rc)
1130 goto mem_err;
1131 frag_i = -1;
1132 }
1133
1134 if (tso_start_new_packet(tx_queue, skb, &state) < 0)
1135 goto mem_err;
1136
1137 while (1) {
Ben Hutchings14bf7182012-05-22 01:27:58 +01001138 tso_fill_packet_with_fragment(tx_queue, skb, &state);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001139
1140 /* Move onto the next fragment? */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001141 if (state.in_len == 0) {
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001142 if (++frag_i >= skb_shinfo(skb)->nr_frags)
1143 /* End of payload reached. */
1144 break;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001145 rc = tso_get_fragment(&state, efx,
1146 skb_shinfo(skb)->frags + frag_i);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001147 if (rc)
1148 goto mem_err;
1149 }
1150
1151 /* Start at new packet? */
1152 if (state.packet_space == 0 &&
1153 tso_start_new_packet(tx_queue, skb, &state) < 0)
1154 goto mem_err;
1155 }
1156
Eric Dumazet449fa022011-11-30 17:12:27 -05001157 netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
1158
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001159 /* Pass off to hardware */
Ben Hutchings152b6a62009-11-29 03:43:56 +00001160 efx_nic_push_buffers(tx_queue);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001161
Ben Hutchings14bf7182012-05-22 01:27:58 +01001162 efx_tx_maybe_stop_queue(tx_queue);
1163
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001164 tx_queue->tso_bursts++;
1165 return NETDEV_TX_OK;
1166
1167 mem_err:
Ben Hutchings62776d02010-06-23 11:30:07 +00001168 netif_err(efx, tx_err, efx->net_dev,
Ben Hutchings0e33d872012-05-17 17:46:55 +01001169 "Out of memory for TSO headers, or DMA mapping error\n");
Ben Hutchings9bc183d2009-11-23 16:06:47 +00001170 dev_kfree_skb_any(skb);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001171
Ben Hutchings5988b632008-09-01 12:46:36 +01001172 /* Free the DMA mapping we were in the process of writing out */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001173 if (state.unmap_len) {
Ben Hutchings7668ff92012-05-17 20:52:20 +01001174 if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE)
Ben Hutchings0e33d872012-05-17 17:46:55 +01001175 dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr,
1176 state.unmap_len, DMA_TO_DEVICE);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001177 else
Ben Hutchings0e33d872012-05-17 17:46:55 +01001178 dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr,
1179 state.unmap_len, DMA_TO_DEVICE);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001180 }
Ben Hutchings5988b632008-09-01 12:46:36 +01001181
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001182 efx_enqueue_unwind(tx_queue);
Ben Hutchings14bf7182012-05-22 01:27:58 +01001183 return NETDEV_TX_OK;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001184}
1185
1186
1187/*
1188 * Free up all TSO datastructures associated with tx_queue. This
1189 * routine should be called only once the tx_queue is both empty and
1190 * will no longer be used.
1191 */
1192static void efx_fini_tso(struct efx_tx_queue *tx_queue)
1193{
1194 unsigned i;
1195
Ben Hutchingsb3475642008-05-16 21:15:49 +01001196 if (tx_queue->buffer) {
Steve Hodgsonecc910f2010-09-10 06:42:22 +00001197 for (i = 0; i <= tx_queue->ptr_mask; ++i)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001198 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
Ben Hutchingsb3475642008-05-16 21:15:49 +01001199 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001200
1201 while (tx_queue->tso_headers_free != NULL)
1202 efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
Ben Hutchings0e33d872012-05-17 17:46:55 +01001203 &tx_queue->efx->pci_dev->dev);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001204}