blob: ebca75ed78dc2d9c4afc83360e17e044522eea4e [file] [log] [blame]
Ben Hutchings8ceee662008-04-27 12:55:59 +01001/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
Ben Hutchings0a6f40c2011-02-25 00:01:34 +00004 * Copyright 2005-2010 Solarflare Communications Inc.
Ben Hutchings8ceee662008-04-27 12:55:59 +01005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/pci.h>
12#include <linux/tcp.h>
13#include <linux/ip.h>
14#include <linux/in.h>
Ben Hutchings738a8f42009-11-29 15:16:05 +000015#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/slab.h>
Ben Hutchings738a8f42009-11-29 15:16:05 +000017#include <net/ipv6.h>
Ben Hutchings8ceee662008-04-27 12:55:59 +010018#include <linux/if_ether.h>
19#include <linux/highmem.h>
20#include "net_driver.h"
Ben Hutchings8ceee662008-04-27 12:55:59 +010021#include "efx.h"
Ben Hutchings744093c2009-11-29 15:12:08 +000022#include "nic.h"
Ben Hutchings8ceee662008-04-27 12:55:59 +010023#include "workarounds.h"
24
Ben Hutchings4d566062008-09-01 12:47:12 +010025static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
Tom Herbertc3940992011-11-28 16:33:43 +000026 struct efx_tx_buffer *buffer,
27 unsigned int *pkts_compl,
28 unsigned int *bytes_compl)
Ben Hutchings8ceee662008-04-27 12:55:59 +010029{
30 if (buffer->unmap_len) {
Ben Hutchings0e33d872012-05-17 17:46:55 +010031 struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
Ben Hutchingscc12dac2008-09-01 12:46:43 +010032 dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
33 buffer->unmap_len);
Ben Hutchings7668ff92012-05-17 20:52:20 +010034 if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
Ben Hutchings0e33d872012-05-17 17:46:55 +010035 dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
36 DMA_TO_DEVICE);
Ben Hutchings8ceee662008-04-27 12:55:59 +010037 else
Ben Hutchings0e33d872012-05-17 17:46:55 +010038 dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
39 DMA_TO_DEVICE);
Ben Hutchings8ceee662008-04-27 12:55:59 +010040 buffer->unmap_len = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +010041 }
42
Ben Hutchings7668ff92012-05-17 20:52:20 +010043 if (buffer->flags & EFX_TX_BUF_SKB) {
Tom Herbertc3940992011-11-28 16:33:43 +000044 (*pkts_compl)++;
45 (*bytes_compl) += buffer->skb->len;
Ben Hutchings8ceee662008-04-27 12:55:59 +010046 dev_kfree_skb_any((struct sk_buff *) buffer->skb);
Ben Hutchings62776d02010-06-23 11:30:07 +000047 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
48 "TX queue %d transmission id %x complete\n",
49 tx_queue->queue, tx_queue->read_count);
Ben Hutchingsf7251a92012-05-17 18:40:54 +010050 } else if (buffer->flags & EFX_TX_BUF_HEAP) {
51 kfree(buffer->heap_buf);
Ben Hutchings8ceee662008-04-27 12:55:59 +010052 }
Ben Hutchings7668ff92012-05-17 20:52:20 +010053
Ben Hutchingsf7251a92012-05-17 18:40:54 +010054 buffer->len = 0;
55 buffer->flags = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +010056}
57
Ben Hutchingsb9b39b62008-05-07 12:51:12 +010058static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
Ben Hutchings740847d2008-09-01 12:48:23 +010059 struct sk_buff *skb);
Ben Hutchings8ceee662008-04-27 12:55:59 +010060
Ben Hutchings63f19882009-10-23 08:31:20 +000061static inline unsigned
62efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
63{
64 /* Depending on the NIC revision, we can use descriptor
65 * lengths up to 8K or 8K-1. However, since PCI Express
66 * devices must split read requests at 4K boundaries, there is
67 * little benefit from using descriptors that cross those
68 * boundaries and we keep things simple by not doing so.
69 */
Ben Hutchings5b6262d2012-02-02 21:21:15 +000070 unsigned len = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1;
Ben Hutchings63f19882009-10-23 08:31:20 +000071
72 /* Work around hardware bug for unaligned buffers. */
73 if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
74 len = min_t(unsigned, len, 512 - (dma_addr & 0xf));
75
76 return len;
77}
78
Ben Hutchings7e6d06f2012-07-30 15:57:44 +000079unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
80{
81 /* Header and payload descriptor for each output segment, plus
82 * one for every input fragment boundary within a segment
83 */
84 unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
85
86 /* Possibly one more per segment for the alignment workaround */
87 if (EFX_WORKAROUND_5391(efx))
88 max_descs += EFX_TSO_MAX_SEGS;
89
90 /* Possibly more for PCIe page boundaries within input fragments */
91 if (PAGE_SIZE > EFX_PAGE_SIZE)
92 max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
93 DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
94
95 return max_descs;
96}
97
Ben Hutchings14bf7182012-05-22 01:27:58 +010098/* Get partner of a TX queue, seen as part of the same net core queue */
99static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
100{
101 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
102 return tx_queue - EFX_TXQ_TYPE_OFFLOAD;
103 else
104 return tx_queue + EFX_TXQ_TYPE_OFFLOAD;
105}
106
107static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
108{
109 /* We need to consider both queues that the net core sees as one */
110 struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1);
111 struct efx_nic *efx = txq1->efx;
112 unsigned int fill_level;
113
114 fill_level = max(txq1->insert_count - txq1->old_read_count,
115 txq2->insert_count - txq2->old_read_count);
116 if (likely(fill_level < efx->txq_stop_thresh))
117 return;
118
119 /* We used the stale old_read_count above, which gives us a
120 * pessimistic estimate of the fill level (which may even
121 * validly be >= efx->txq_entries). Now try again using
122 * read_count (more likely to be a cache miss).
123 *
124 * If we read read_count and then conditionally stop the
125 * queue, it is possible for the completion path to race with
126 * us and complete all outstanding descriptors in the middle,
127 * after which there will be no more completions to wake it.
128 * Therefore we stop the queue first, then read read_count
129 * (with a memory barrier to ensure the ordering), then
130 * restart the queue if the fill level turns out to be low
131 * enough.
132 */
133 netif_tx_stop_queue(txq1->core_txq);
134 smp_mb();
135 txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
136 txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
137
138 fill_level = max(txq1->insert_count - txq1->old_read_count,
139 txq2->insert_count - txq2->old_read_count);
140 EFX_BUG_ON_PARANOID(fill_level >= efx->txq_entries);
141 if (likely(fill_level < efx->txq_stop_thresh)) {
142 smp_mb();
143 if (likely(!efx->loopback_selftest))
144 netif_tx_start_queue(txq1->core_txq);
145 }
146}
147
Ben Hutchings8ceee662008-04-27 12:55:59 +0100148/*
149 * Add a socket buffer to a TX queue
150 *
151 * This maps all fragments of a socket buffer for DMA and adds them to
152 * the TX queue. The queue's insert pointer will be incremented by
153 * the number of fragments in the socket buffer.
154 *
155 * If any DMA mapping fails, any mapped fragments will be unmapped,
156 * the queue's insert pointer will be restored to its original value.
157 *
Ben Hutchings497f5ba2009-11-23 16:07:05 +0000158 * This function is split out from efx_hard_start_xmit to allow the
159 * loopback test to direct packets via specific TX queues.
160 *
Ben Hutchings14bf7182012-05-22 01:27:58 +0100161 * Returns NETDEV_TX_OK.
Ben Hutchings8ceee662008-04-27 12:55:59 +0100162 * You must hold netif_tx_lock() to call this function.
163 */
Ben Hutchings497f5ba2009-11-23 16:07:05 +0000164netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100165{
166 struct efx_nic *efx = tx_queue->efx;
Ben Hutchings0e33d872012-05-17 17:46:55 +0100167 struct device *dma_dev = &efx->pci_dev->dev;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100168 struct efx_tx_buffer *buffer;
169 skb_frag_t *fragment;
Ben Hutchings14bf7182012-05-22 01:27:58 +0100170 unsigned int len, unmap_len = 0, insert_ptr;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100171 dma_addr_t dma_addr, unmap_addr = 0;
172 unsigned int dma_len;
Ben Hutchings7668ff92012-05-17 20:52:20 +0100173 unsigned short dma_flags;
Ben Hutchings14bf7182012-05-22 01:27:58 +0100174 int i = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100175
176 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
177
Ben Hutchings9bc183d2009-11-23 16:06:47 +0000178 if (skb_shinfo(skb)->gso_size)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100179 return efx_enqueue_skb_tso(tx_queue, skb);
180
Ben Hutchings8ceee662008-04-27 12:55:59 +0100181 /* Get size of the initial fragment */
182 len = skb_headlen(skb);
183
Ben Hutchingsbb145a92009-03-20 13:25:39 +0000184 /* Pad if necessary */
185 if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) {
186 EFX_BUG_ON_PARANOID(skb->data_len);
187 len = 32 + 1;
188 if (skb_pad(skb, len - skb->len))
189 return NETDEV_TX_OK;
190 }
191
Ben Hutchings0e33d872012-05-17 17:46:55 +0100192 /* Map for DMA. Use dma_map_single rather than dma_map_page
Ben Hutchings8ceee662008-04-27 12:55:59 +0100193 * since this is more efficient on machines with sparse
194 * memory.
195 */
Ben Hutchings7668ff92012-05-17 20:52:20 +0100196 dma_flags = EFX_TX_BUF_MAP_SINGLE;
Ben Hutchings0e33d872012-05-17 17:46:55 +0100197 dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100198
199 /* Process all fragments */
200 while (1) {
Ben Hutchings0e33d872012-05-17 17:46:55 +0100201 if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
202 goto dma_err;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100203
204 /* Store fields for marking in the per-fragment final
205 * descriptor */
206 unmap_len = len;
207 unmap_addr = dma_addr;
208
209 /* Add to TX queue, splitting across DMA boundaries */
210 do {
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000211 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100212 buffer = &tx_queue->buffer[insert_ptr];
Ben Hutchings7668ff92012-05-17 20:52:20 +0100213 EFX_BUG_ON_PARANOID(buffer->flags);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100214 EFX_BUG_ON_PARANOID(buffer->len);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100215 EFX_BUG_ON_PARANOID(buffer->unmap_len);
216
Ben Hutchings63f19882009-10-23 08:31:20 +0000217 dma_len = efx_max_tx_len(efx, dma_addr);
218 if (likely(dma_len >= len))
Ben Hutchings8ceee662008-04-27 12:55:59 +0100219 dma_len = len;
220
Ben Hutchings8ceee662008-04-27 12:55:59 +0100221 /* Fill out per descriptor fields */
222 buffer->len = dma_len;
223 buffer->dma_addr = dma_addr;
Ben Hutchings7668ff92012-05-17 20:52:20 +0100224 buffer->flags = EFX_TX_BUF_CONT;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100225 len -= dma_len;
226 dma_addr += dma_len;
227 ++tx_queue->insert_count;
228 } while (len);
229
230 /* Transfer ownership of the unmapping to the final buffer */
Ben Hutchings7668ff92012-05-17 20:52:20 +0100231 buffer->flags = EFX_TX_BUF_CONT | dma_flags;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100232 buffer->unmap_len = unmap_len;
233 unmap_len = 0;
234
235 /* Get address and size of next fragment */
236 if (i >= skb_shinfo(skb)->nr_frags)
237 break;
238 fragment = &skb_shinfo(skb)->frags[i];
Eric Dumazet9e903e02011-10-18 21:00:24 +0000239 len = skb_frag_size(fragment);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100240 i++;
241 /* Map for DMA */
Ben Hutchings7668ff92012-05-17 20:52:20 +0100242 dma_flags = 0;
Ben Hutchings0e33d872012-05-17 17:46:55 +0100243 dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
Ian Campbell5d6bcdf2011-10-06 11:10:48 +0100244 DMA_TO_DEVICE);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100245 }
246
247 /* Transfer ownership of the skb to the final buffer */
248 buffer->skb = skb;
Ben Hutchings7668ff92012-05-17 20:52:20 +0100249 buffer->flags = EFX_TX_BUF_SKB | dma_flags;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100250
Tom Herbertc3940992011-11-28 16:33:43 +0000251 netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
252
Ben Hutchings8ceee662008-04-27 12:55:59 +0100253 /* Pass off to hardware */
Ben Hutchings152b6a62009-11-29 03:43:56 +0000254 efx_nic_push_buffers(tx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100255
Ben Hutchings14bf7182012-05-22 01:27:58 +0100256 efx_tx_maybe_stop_queue(tx_queue);
257
Ben Hutchings8ceee662008-04-27 12:55:59 +0100258 return NETDEV_TX_OK;
259
Ben Hutchings0e33d872012-05-17 17:46:55 +0100260 dma_err:
Ben Hutchings62776d02010-06-23 11:30:07 +0000261 netif_err(efx, tx_err, efx->net_dev,
262 " TX queue %d could not map skb with %d bytes %d "
263 "fragments for DMA\n", tx_queue->queue, skb->len,
264 skb_shinfo(skb)->nr_frags + 1);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100265
266 /* Mark the packet as transmitted, and free the SKB ourselves */
Ben Hutchings9bc183d2009-11-23 16:06:47 +0000267 dev_kfree_skb_any(skb);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100268
Ben Hutchings8ceee662008-04-27 12:55:59 +0100269 /* Work backwards until we hit the original insert pointer value */
270 while (tx_queue->insert_count != tx_queue->write_count) {
Tom Herbertc3940992011-11-28 16:33:43 +0000271 unsigned int pkts_compl = 0, bytes_compl = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100272 --tx_queue->insert_count;
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000273 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100274 buffer = &tx_queue->buffer[insert_ptr];
Tom Herbertc3940992011-11-28 16:33:43 +0000275 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100276 }
277
278 /* Free the fragment we were mid-way through pushing */
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100279 if (unmap_len) {
Ben Hutchings7668ff92012-05-17 20:52:20 +0100280 if (dma_flags & EFX_TX_BUF_MAP_SINGLE)
Ben Hutchings0e33d872012-05-17 17:46:55 +0100281 dma_unmap_single(dma_dev, unmap_addr, unmap_len,
282 DMA_TO_DEVICE);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100283 else
Ben Hutchings0e33d872012-05-17 17:46:55 +0100284 dma_unmap_page(dma_dev, unmap_addr, unmap_len,
285 DMA_TO_DEVICE);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100286 }
Ben Hutchings8ceee662008-04-27 12:55:59 +0100287
Ben Hutchings14bf7182012-05-22 01:27:58 +0100288 return NETDEV_TX_OK;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100289}
290
291/* Remove packets from the TX queue
292 *
293 * This removes packets from the TX queue, up to and including the
294 * specified index.
295 */
Ben Hutchings4d566062008-09-01 12:47:12 +0100296static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
Tom Herbertc3940992011-11-28 16:33:43 +0000297 unsigned int index,
298 unsigned int *pkts_compl,
299 unsigned int *bytes_compl)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100300{
301 struct efx_nic *efx = tx_queue->efx;
302 unsigned int stop_index, read_ptr;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100303
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000304 stop_index = (index + 1) & tx_queue->ptr_mask;
305 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100306
307 while (read_ptr != stop_index) {
308 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
309 if (unlikely(buffer->len == 0)) {
Ben Hutchings62776d02010-06-23 11:30:07 +0000310 netif_err(efx, tx_err, efx->net_dev,
311 "TX queue %d spurious TX completion id %x\n",
312 tx_queue->queue, read_ptr);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100313 efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
314 return;
315 }
316
Tom Herbertc3940992011-11-28 16:33:43 +0000317 efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100318
319 ++tx_queue->read_count;
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000320 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100321 }
322}
323
Ben Hutchings8ceee662008-04-27 12:55:59 +0100324/* Initiate a packet transmission. We use one channel per CPU
325 * (sharing when we have more CPUs than channels). On Falcon, the TX
326 * completion events will be directed back to the CPU that transmitted
327 * the packet, which should be cache-efficient.
328 *
329 * Context: non-blocking.
330 * Note that returning anything other than NETDEV_TX_OK will cause the
331 * OS to free the skb.
332 */
Stephen Hemminger613573252009-08-31 19:50:58 +0000333netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
Ben Hutchings2d0cc562012-02-17 00:10:45 +0000334 struct net_device *net_dev)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100335{
Ben Hutchings767e4682008-09-01 12:43:14 +0100336 struct efx_nic *efx = netdev_priv(net_dev);
Ben Hutchings60ac1062008-09-01 12:44:59 +0100337 struct efx_tx_queue *tx_queue;
Ben Hutchings94b274b2011-01-10 21:18:20 +0000338 unsigned index, type;
Ben Hutchings60ac1062008-09-01 12:44:59 +0100339
Ben Hutchingse4abce82011-05-16 18:51:24 +0100340 EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
Ben Hutchingsa7ef5932009-03-04 09:52:37 +0000341
Ben Hutchings94b274b2011-01-10 21:18:20 +0000342 index = skb_get_queue_mapping(skb);
343 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
344 if (index >= efx->n_tx_channels) {
345 index -= efx->n_tx_channels;
346 type |= EFX_TXQ_TYPE_HIGHPRI;
347 }
348 tx_queue = efx_get_tx_queue(efx, index, type);
Ben Hutchings60ac1062008-09-01 12:44:59 +0100349
Ben Hutchings497f5ba2009-11-23 16:07:05 +0000350 return efx_enqueue_skb(tx_queue, skb);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100351}
352
Ben Hutchings60031fc2011-01-12 18:39:40 +0000353void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
354{
Ben Hutchings94b274b2011-01-10 21:18:20 +0000355 struct efx_nic *efx = tx_queue->efx;
356
Ben Hutchings60031fc2011-01-12 18:39:40 +0000357 /* Must be inverse of queue lookup in efx_hard_start_xmit() */
Ben Hutchings94b274b2011-01-10 21:18:20 +0000358 tx_queue->core_txq =
359 netdev_get_tx_queue(efx->net_dev,
360 tx_queue->queue / EFX_TXQ_TYPES +
361 ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
362 efx->n_tx_channels : 0));
363}
364
365int efx_setup_tc(struct net_device *net_dev, u8 num_tc)
366{
367 struct efx_nic *efx = netdev_priv(net_dev);
368 struct efx_channel *channel;
369 struct efx_tx_queue *tx_queue;
370 unsigned tc;
371 int rc;
372
373 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
374 return -EINVAL;
375
376 if (num_tc == net_dev->num_tc)
377 return 0;
378
379 for (tc = 0; tc < num_tc; tc++) {
380 net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
381 net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
382 }
383
384 if (num_tc > net_dev->num_tc) {
385 /* Initialise high-priority queues as necessary */
386 efx_for_each_channel(channel, efx) {
387 efx_for_each_possible_channel_tx_queue(tx_queue,
388 channel) {
389 if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
390 continue;
391 if (!tx_queue->buffer) {
392 rc = efx_probe_tx_queue(tx_queue);
393 if (rc)
394 return rc;
395 }
396 if (!tx_queue->initialised)
397 efx_init_tx_queue(tx_queue);
398 efx_init_tx_queue_core_txq(tx_queue);
399 }
400 }
401 } else {
402 /* Reduce number of classes before number of queues */
403 net_dev->num_tc = num_tc;
404 }
405
406 rc = netif_set_real_num_tx_queues(net_dev,
407 max_t(int, num_tc, 1) *
408 efx->n_tx_channels);
409 if (rc)
410 return rc;
411
412 /* Do not destroy high-priority queues when they become
413 * unused. We would have to flush them first, and it is
414 * fairly difficult to flush a subset of TX queues. Leave
415 * it to efx_fini_channels().
416 */
417
418 net_dev->num_tc = num_tc;
419 return 0;
Ben Hutchings60031fc2011-01-12 18:39:40 +0000420}
421
Ben Hutchings8ceee662008-04-27 12:55:59 +0100422void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
423{
424 unsigned fill_level;
425 struct efx_nic *efx = tx_queue->efx;
Ben Hutchings14bf7182012-05-22 01:27:58 +0100426 struct efx_tx_queue *txq2;
Tom Herbertc3940992011-11-28 16:33:43 +0000427 unsigned int pkts_compl = 0, bytes_compl = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100428
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000429 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100430
Tom Herbertc3940992011-11-28 16:33:43 +0000431 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
432 netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100433
Ben Hutchings14bf7182012-05-22 01:27:58 +0100434 /* See if we need to restart the netif queue. This memory
435 * barrier ensures that we write read_count (inside
436 * efx_dequeue_buffers()) before reading the queue status.
437 */
Ben Hutchings8ceee662008-04-27 12:55:59 +0100438 smp_mb();
Ben Hutchingsc04bfc62010-12-10 01:24:16 +0000439 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
Neil Turton9d1aea62011-04-04 13:46:23 +0100440 likely(efx->port_enabled) &&
Ben Hutchingse4abce82011-05-16 18:51:24 +0100441 likely(netif_device_present(efx->net_dev))) {
Ben Hutchings14bf7182012-05-22 01:27:58 +0100442 txq2 = efx_tx_queue_partner(tx_queue);
443 fill_level = max(tx_queue->insert_count - tx_queue->read_count,
444 txq2->insert_count - txq2->read_count);
445 if (fill_level <= efx->txq_wake_thresh)
Ben Hutchingsc04bfc62010-12-10 01:24:16 +0000446 netif_tx_wake_queue(tx_queue->core_txq);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100447 }
Ben Hutchingscd385572010-11-15 23:53:11 +0000448
449 /* Check whether the hardware queue is now empty */
450 if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
451 tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
452 if (tx_queue->read_count == tx_queue->old_write_count) {
453 smp_mb();
454 tx_queue->empty_read_count =
455 tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
456 }
457 }
Ben Hutchings8ceee662008-04-27 12:55:59 +0100458}
459
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100460/* Size of page-based TSO header buffers. Larger blocks must be
461 * allocated from the heap.
462 */
463#define TSOH_STD_SIZE 128
464#define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE)
465
466/* At most half the descriptors in the queue at any time will refer to
467 * a TSO header buffer, since they must always be followed by a
468 * payload descriptor referring to an skb.
469 */
470static unsigned int efx_tsoh_page_count(struct efx_tx_queue *tx_queue)
471{
472 return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 2 * TSOH_PER_PAGE);
473}
474
Ben Hutchings8ceee662008-04-27 12:55:59 +0100475int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
476{
477 struct efx_nic *efx = tx_queue->efx;
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000478 unsigned int entries;
Ben Hutchings7668ff92012-05-17 20:52:20 +0100479 int rc;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100480
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000481 /* Create the smallest power-of-two aligned ring */
482 entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
483 EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
484 tx_queue->ptr_mask = entries - 1;
485
486 netif_dbg(efx, probe, efx->net_dev,
487 "creating TX queue %d size %#x mask %#x\n",
488 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100489
490 /* Allocate software ring */
Thomas Meyerc2e4e252011-12-02 12:36:13 +0000491 tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000492 GFP_KERNEL);
Ben Hutchings60ac1062008-09-01 12:44:59 +0100493 if (!tx_queue->buffer)
494 return -ENOMEM;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100495
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100496 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) {
497 tx_queue->tsoh_page =
498 kcalloc(efx_tsoh_page_count(tx_queue),
499 sizeof(tx_queue->tsoh_page[0]), GFP_KERNEL);
500 if (!tx_queue->tsoh_page) {
501 rc = -ENOMEM;
502 goto fail1;
503 }
504 }
505
Ben Hutchings8ceee662008-04-27 12:55:59 +0100506 /* Allocate hardware ring */
Ben Hutchings152b6a62009-11-29 03:43:56 +0000507 rc = efx_nic_probe_tx(tx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100508 if (rc)
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100509 goto fail2;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100510
511 return 0;
512
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100513fail2:
514 kfree(tx_queue->tsoh_page);
515 tx_queue->tsoh_page = NULL;
516fail1:
Ben Hutchings8ceee662008-04-27 12:55:59 +0100517 kfree(tx_queue->buffer);
518 tx_queue->buffer = NULL;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100519 return rc;
520}
521
Ben Hutchingsbc3c90a2008-09-01 12:48:46 +0100522void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100523{
Ben Hutchings62776d02010-06-23 11:30:07 +0000524 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
525 "initialising TX queue %d\n", tx_queue->queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100526
527 tx_queue->insert_count = 0;
528 tx_queue->write_count = 0;
Ben Hutchingscd385572010-11-15 23:53:11 +0000529 tx_queue->old_write_count = 0;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100530 tx_queue->read_count = 0;
531 tx_queue->old_read_count = 0;
Ben Hutchingscd385572010-11-15 23:53:11 +0000532 tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100533
534 /* Set up TX descriptor ring */
Ben Hutchings152b6a62009-11-29 03:43:56 +0000535 efx_nic_init_tx(tx_queue);
Ben Hutchings94b274b2011-01-10 21:18:20 +0000536
537 tx_queue->initialised = true;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100538}
539
540void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
541{
542 struct efx_tx_buffer *buffer;
543
544 if (!tx_queue->buffer)
545 return;
546
547 /* Free any buffers left in the ring */
548 while (tx_queue->read_count != tx_queue->write_count) {
Tom Herbertc3940992011-11-28 16:33:43 +0000549 unsigned int pkts_compl = 0, bytes_compl = 0;
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000550 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
Tom Herbertc3940992011-11-28 16:33:43 +0000551 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100552
553 ++tx_queue->read_count;
554 }
Tom Herbertc3940992011-11-28 16:33:43 +0000555 netdev_tx_reset_queue(tx_queue->core_txq);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100556}
557
558void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
559{
Ben Hutchings94b274b2011-01-10 21:18:20 +0000560 if (!tx_queue->initialised)
561 return;
562
Ben Hutchings62776d02010-06-23 11:30:07 +0000563 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
564 "shutting down TX queue %d\n", tx_queue->queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100565
Ben Hutchings94b274b2011-01-10 21:18:20 +0000566 tx_queue->initialised = false;
567
Ben Hutchings8ceee662008-04-27 12:55:59 +0100568 /* Flush TX queue, remove descriptor ring */
Ben Hutchings152b6a62009-11-29 03:43:56 +0000569 efx_nic_fini_tx(tx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100570
571 efx_release_tx_buffers(tx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100572}
573
574void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
575{
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100576 int i;
577
Ben Hutchings94b274b2011-01-10 21:18:20 +0000578 if (!tx_queue->buffer)
579 return;
580
Ben Hutchings62776d02010-06-23 11:30:07 +0000581 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
582 "destroying TX queue %d\n", tx_queue->queue);
Ben Hutchings152b6a62009-11-29 03:43:56 +0000583 efx_nic_remove_tx(tx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100584
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100585 if (tx_queue->tsoh_page) {
586 for (i = 0; i < efx_tsoh_page_count(tx_queue); i++)
587 efx_nic_free_buffer(tx_queue->efx,
588 &tx_queue->tsoh_page[i]);
589 kfree(tx_queue->tsoh_page);
590 tx_queue->tsoh_page = NULL;
591 }
592
Ben Hutchings8ceee662008-04-27 12:55:59 +0100593 kfree(tx_queue->buffer);
594 tx_queue->buffer = NULL;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100595}
596
597
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100598/* Efx TCP segmentation acceleration.
599 *
600 * Why? Because by doing it here in the driver we can go significantly
601 * faster than the GSO.
602 *
603 * Requires TX checksum offload support.
604 */
605
606/* Number of bytes inserted at the start of a TSO header buffer,
607 * similar to NET_IP_ALIGN.
608 */
Ben Hutchings13e9ab12008-09-01 12:50:28 +0100609#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100610#define TSOH_OFFSET 0
611#else
612#define TSOH_OFFSET NET_IP_ALIGN
613#endif
614
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100615#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100616
617/**
618 * struct tso_state - TSO state for an SKB
Ben Hutchings23d9e602008-09-01 12:47:02 +0100619 * @out_len: Remaining length in current segment
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100620 * @seqnum: Current sequence number
Ben Hutchings23d9e602008-09-01 12:47:02 +0100621 * @ipv4_id: Current IPv4 ID, host endian
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100622 * @packet_space: Remaining space in current packet
Ben Hutchings23d9e602008-09-01 12:47:02 +0100623 * @dma_addr: DMA address of current position
624 * @in_len: Remaining length in current SKB fragment
625 * @unmap_len: Length of SKB fragment
626 * @unmap_addr: DMA address of SKB fragment
Ben Hutchings7668ff92012-05-17 20:52:20 +0100627 * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0
Ben Hutchings738a8f42009-11-29 15:16:05 +0000628 * @protocol: Network protocol (after any VLAN header)
Ben Hutchings97142842012-06-22 02:44:01 +0100629 * @ip_off: Offset of IP header
630 * @tcp_off: Offset of TCP header
Ben Hutchings23d9e602008-09-01 12:47:02 +0100631 * @header_len: Number of bytes of header
Ben Hutchings53cb13c2012-06-19 20:03:41 +0100632 * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100633 *
634 * The state used during segmentation. It is put into this data structure
635 * just to make it easy to pass into inline functions.
636 */
637struct tso_state {
Ben Hutchings23d9e602008-09-01 12:47:02 +0100638 /* Output position */
639 unsigned out_len;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100640 unsigned seqnum;
Ben Hutchings23d9e602008-09-01 12:47:02 +0100641 unsigned ipv4_id;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100642 unsigned packet_space;
643
Ben Hutchings23d9e602008-09-01 12:47:02 +0100644 /* Input position */
645 dma_addr_t dma_addr;
646 unsigned in_len;
647 unsigned unmap_len;
648 dma_addr_t unmap_addr;
Ben Hutchings7668ff92012-05-17 20:52:20 +0100649 unsigned short dma_flags;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100650
Ben Hutchings738a8f42009-11-29 15:16:05 +0000651 __be16 protocol;
Ben Hutchings97142842012-06-22 02:44:01 +0100652 unsigned int ip_off;
653 unsigned int tcp_off;
Ben Hutchings23d9e602008-09-01 12:47:02 +0100654 unsigned header_len;
Ben Hutchings53cb13c2012-06-19 20:03:41 +0100655 unsigned int ip_base_len;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100656};
657
658
659/*
660 * Verify that our various assumptions about sk_buffs and the conditions
Ben Hutchings738a8f42009-11-29 15:16:05 +0000661 * under which TSO will be attempted hold true. Return the protocol number.
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100662 */
Ben Hutchings738a8f42009-11-29 15:16:05 +0000663static __be16 efx_tso_check_protocol(struct sk_buff *skb)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100664{
Ben Hutchings740847d2008-09-01 12:48:23 +0100665 __be16 protocol = skb->protocol;
666
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100667 EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
Ben Hutchings740847d2008-09-01 12:48:23 +0100668 protocol);
669 if (protocol == htons(ETH_P_8021Q)) {
Ben Hutchings740847d2008-09-01 12:48:23 +0100670 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
671 protocol = veh->h_vlan_encapsulated_proto;
Ben Hutchings740847d2008-09-01 12:48:23 +0100672 }
673
Ben Hutchings738a8f42009-11-29 15:16:05 +0000674 if (protocol == htons(ETH_P_IP)) {
675 EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
676 } else {
677 EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6));
678 EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP);
679 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100680 EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
681 + (tcp_hdr(skb)->doff << 2u)) >
682 skb_headlen(skb));
Ben Hutchings738a8f42009-11-29 15:16:05 +0000683
684 return protocol;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100685}
686
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100687static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
688 struct efx_tx_buffer *buffer, unsigned int len)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100689{
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100690 u8 *result;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100691
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100692 EFX_BUG_ON_PARANOID(buffer->len);
693 EFX_BUG_ON_PARANOID(buffer->flags);
694 EFX_BUG_ON_PARANOID(buffer->unmap_len);
695
696 if (likely(len <= TSOH_STD_SIZE - TSOH_OFFSET)) {
697 unsigned index =
698 (tx_queue->insert_count & tx_queue->ptr_mask) / 2;
699 struct efx_buffer *page_buf =
700 &tx_queue->tsoh_page[index / TSOH_PER_PAGE];
701 unsigned offset =
702 TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET;
703
704 if (unlikely(!page_buf->addr) &&
705 efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE))
706 return NULL;
707
708 result = (u8 *)page_buf->addr + offset;
709 buffer->dma_addr = page_buf->dma_addr + offset;
710 buffer->flags = EFX_TX_BUF_CONT;
711 } else {
712 tx_queue->tso_long_headers++;
713
714 buffer->heap_buf = kmalloc(TSOH_OFFSET + len, GFP_ATOMIC);
715 if (unlikely(!buffer->heap_buf))
716 return NULL;
717 result = (u8 *)buffer->heap_buf + TSOH_OFFSET;
718 buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100719 }
720
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100721 buffer->len = len;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100722
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100723 return result;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100724}
725
726/**
727 * efx_tx_queue_insert - push descriptors onto the TX queue
728 * @tx_queue: Efx TX queue
729 * @dma_addr: DMA address of fragment
730 * @len: Length of fragment
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100731 * @final_buffer: The final buffer inserted into the queue
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100732 *
Ben Hutchings14bf7182012-05-22 01:27:58 +0100733 * Push descriptors onto the TX queue.
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100734 */
Ben Hutchings14bf7182012-05-22 01:27:58 +0100735static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
736 dma_addr_t dma_addr, unsigned len,
737 struct efx_tx_buffer **final_buffer)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100738{
739 struct efx_tx_buffer *buffer;
740 struct efx_nic *efx = tx_queue->efx;
Ben Hutchings14bf7182012-05-22 01:27:58 +0100741 unsigned dma_len, insert_ptr;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100742
743 EFX_BUG_ON_PARANOID(len <= 0);
744
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100745 while (1) {
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000746 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100747 buffer = &tx_queue->buffer[insert_ptr];
748 ++tx_queue->insert_count;
749
750 EFX_BUG_ON_PARANOID(tx_queue->insert_count -
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000751 tx_queue->read_count >=
752 efx->txq_entries);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100753
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100754 EFX_BUG_ON_PARANOID(buffer->len);
755 EFX_BUG_ON_PARANOID(buffer->unmap_len);
Ben Hutchings7668ff92012-05-17 20:52:20 +0100756 EFX_BUG_ON_PARANOID(buffer->flags);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100757
758 buffer->dma_addr = dma_addr;
759
Ben Hutchings63f19882009-10-23 08:31:20 +0000760 dma_len = efx_max_tx_len(efx, dma_addr);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100761
762 /* If there is enough space to send then do so */
763 if (dma_len >= len)
764 break;
765
Ben Hutchings7668ff92012-05-17 20:52:20 +0100766 buffer->len = dma_len;
767 buffer->flags = EFX_TX_BUF_CONT;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100768 dma_addr += dma_len;
769 len -= dma_len;
770 }
771
772 EFX_BUG_ON_PARANOID(!len);
773 buffer->len = len;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100774 *final_buffer = buffer;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100775}
776
777
778/*
779 * Put a TSO header into the TX queue.
780 *
781 * This is special-cased because we know that it is small enough to fit in
782 * a single fragment, and we know it doesn't cross a page boundary. It
783 * also allows us to not worry about end-of-packet etc.
784 */
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100785static int efx_tso_put_header(struct efx_tx_queue *tx_queue,
786 struct efx_tx_buffer *buffer, u8 *header)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100787{
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100788 if (unlikely(buffer->flags & EFX_TX_BUF_HEAP)) {
789 buffer->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
790 header, buffer->len,
791 DMA_TO_DEVICE);
792 if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
793 buffer->dma_addr))) {
794 kfree(buffer->heap_buf);
795 buffer->len = 0;
796 buffer->flags = 0;
797 return -ENOMEM;
798 }
799 buffer->unmap_len = buffer->len;
800 buffer->flags |= EFX_TX_BUF_MAP_SINGLE;
801 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100802
803 ++tx_queue->insert_count;
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100804 return 0;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100805}
806
807
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100808/* Remove buffers put into a tx_queue. None of the buffers must have
809 * an skb attached.
810 */
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100811static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
812{
813 struct efx_tx_buffer *buffer;
814
815 /* Work backwards until we hit the original insert pointer value */
816 while (tx_queue->insert_count != tx_queue->write_count) {
817 --tx_queue->insert_count;
818 buffer = &tx_queue->buffer[tx_queue->insert_count &
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000819 tx_queue->ptr_mask];
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100820 efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100821 }
822}
823
824
825/* Parse the SKB header and initialise state. */
Ben Hutchings4d566062008-09-01 12:47:12 +0100826static void tso_start(struct tso_state *st, const struct sk_buff *skb)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100827{
Ben Hutchings97142842012-06-22 02:44:01 +0100828 st->ip_off = skb_network_header(skb) - skb->data;
829 st->tcp_off = skb_transport_header(skb) - skb->data;
830 st->header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
Ben Hutchings53cb13c2012-06-19 20:03:41 +0100831 if (st->protocol == htons(ETH_P_IP)) {
Ben Hutchings97142842012-06-22 02:44:01 +0100832 st->ip_base_len = st->header_len - st->ip_off;
Ben Hutchings738a8f42009-11-29 15:16:05 +0000833 st->ipv4_id = ntohs(ip_hdr(skb)->id);
Ben Hutchings53cb13c2012-06-19 20:03:41 +0100834 } else {
Ben Hutchings97142842012-06-22 02:44:01 +0100835 st->ip_base_len = st->header_len - st->tcp_off;
Ben Hutchings738a8f42009-11-29 15:16:05 +0000836 st->ipv4_id = 0;
Ben Hutchings53cb13c2012-06-19 20:03:41 +0100837 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100838 st->seqnum = ntohl(tcp_hdr(skb)->seq);
839
840 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
841 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
842 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
843
Ben Hutchings23d9e602008-09-01 12:47:02 +0100844 st->out_len = skb->len - st->header_len;
845 st->unmap_len = 0;
Ben Hutchings7668ff92012-05-17 20:52:20 +0100846 st->dma_flags = 0;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100847}
848
Ben Hutchings4d566062008-09-01 12:47:12 +0100849static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
850 skb_frag_t *frag)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100851{
Ian Campbell4a22c4c2011-09-21 21:53:16 +0000852 st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000853 skb_frag_size(frag), DMA_TO_DEVICE);
Ian Campbell5d6bcdf2011-10-06 11:10:48 +0100854 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
Ben Hutchings7668ff92012-05-17 20:52:20 +0100855 st->dma_flags = 0;
Eric Dumazet9e903e02011-10-18 21:00:24 +0000856 st->unmap_len = skb_frag_size(frag);
857 st->in_len = skb_frag_size(frag);
Ben Hutchings23d9e602008-09-01 12:47:02 +0100858 st->dma_addr = st->unmap_addr;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100859 return 0;
860 }
861 return -ENOMEM;
862}
863
Ben Hutchings4d566062008-09-01 12:47:12 +0100864static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
865 const struct sk_buff *skb)
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100866{
Ben Hutchings23d9e602008-09-01 12:47:02 +0100867 int hl = st->header_len;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100868 int len = skb_headlen(skb) - hl;
869
Ben Hutchings0e33d872012-05-17 17:46:55 +0100870 st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl,
871 len, DMA_TO_DEVICE);
872 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
Ben Hutchings7668ff92012-05-17 20:52:20 +0100873 st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
Ben Hutchings23d9e602008-09-01 12:47:02 +0100874 st->unmap_len = len;
875 st->in_len = len;
876 st->dma_addr = st->unmap_addr;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100877 return 0;
878 }
879 return -ENOMEM;
880}
881
882
883/**
884 * tso_fill_packet_with_fragment - form descriptors for the current fragment
885 * @tx_queue: Efx TX queue
886 * @skb: Socket buffer
887 * @st: TSO state
888 *
889 * Form descriptors for the current fragment, until we reach the end
Ben Hutchings14bf7182012-05-22 01:27:58 +0100890 * of fragment or end-of-packet.
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100891 */
Ben Hutchings14bf7182012-05-22 01:27:58 +0100892static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
893 const struct sk_buff *skb,
894 struct tso_state *st)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100895{
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100896 struct efx_tx_buffer *buffer;
Ben Hutchings14bf7182012-05-22 01:27:58 +0100897 int n;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100898
Ben Hutchings23d9e602008-09-01 12:47:02 +0100899 if (st->in_len == 0)
Ben Hutchings14bf7182012-05-22 01:27:58 +0100900 return;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100901 if (st->packet_space == 0)
Ben Hutchings14bf7182012-05-22 01:27:58 +0100902 return;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100903
Ben Hutchings23d9e602008-09-01 12:47:02 +0100904 EFX_BUG_ON_PARANOID(st->in_len <= 0);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100905 EFX_BUG_ON_PARANOID(st->packet_space <= 0);
906
Ben Hutchings23d9e602008-09-01 12:47:02 +0100907 n = min(st->in_len, st->packet_space);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100908
909 st->packet_space -= n;
Ben Hutchings23d9e602008-09-01 12:47:02 +0100910 st->out_len -= n;
911 st->in_len -= n;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100912
Ben Hutchings14bf7182012-05-22 01:27:58 +0100913 efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100914
Ben Hutchings14bf7182012-05-22 01:27:58 +0100915 if (st->out_len == 0) {
916 /* Transfer ownership of the skb */
917 buffer->skb = skb;
918 buffer->flags = EFX_TX_BUF_SKB;
919 } else if (st->packet_space != 0) {
920 buffer->flags = EFX_TX_BUF_CONT;
921 }
922
923 if (st->in_len == 0) {
924 /* Transfer ownership of the DMA mapping */
925 buffer->unmap_len = st->unmap_len;
926 buffer->flags |= st->dma_flags;
927 st->unmap_len = 0;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100928 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100929
Ben Hutchings23d9e602008-09-01 12:47:02 +0100930 st->dma_addr += n;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100931}
932
933
934/**
935 * tso_start_new_packet - generate a new header and prepare for the new packet
936 * @tx_queue: Efx TX queue
937 * @skb: Socket buffer
938 * @st: TSO state
939 *
940 * Generate a new header and prepare for the new packet. Return 0 on
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100941 * success, or -%ENOMEM if failed to alloc header.
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100942 */
Ben Hutchings4d566062008-09-01 12:47:12 +0100943static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
944 const struct sk_buff *skb,
945 struct tso_state *st)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100946{
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100947 struct efx_tx_buffer *buffer =
948 &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100949 struct tcphdr *tsoh_th;
950 unsigned ip_length;
951 u8 *header;
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100952 int rc;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100953
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100954 /* Allocate and insert a DMA-mapped header buffer. */
955 header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
956 if (!header)
957 return -ENOMEM;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100958
Ben Hutchings97142842012-06-22 02:44:01 +0100959 tsoh_th = (struct tcphdr *)(header + st->tcp_off);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100960
961 /* Copy and update the headers. */
Ben Hutchings23d9e602008-09-01 12:47:02 +0100962 memcpy(header, skb->data, st->header_len);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100963
964 tsoh_th->seq = htonl(st->seqnum);
965 st->seqnum += skb_shinfo(skb)->gso_size;
Ben Hutchings23d9e602008-09-01 12:47:02 +0100966 if (st->out_len > skb_shinfo(skb)->gso_size) {
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100967 /* This packet will not finish the TSO burst. */
Ben Hutchings53cb13c2012-06-19 20:03:41 +0100968 st->packet_space = skb_shinfo(skb)->gso_size;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100969 tsoh_th->fin = 0;
970 tsoh_th->psh = 0;
971 } else {
972 /* This packet will be the last in the TSO burst. */
Ben Hutchings53cb13c2012-06-19 20:03:41 +0100973 st->packet_space = st->out_len;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100974 tsoh_th->fin = tcp_hdr(skb)->fin;
975 tsoh_th->psh = tcp_hdr(skb)->psh;
976 }
Ben Hutchings53cb13c2012-06-19 20:03:41 +0100977 ip_length = st->ip_base_len + st->packet_space;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100978
Ben Hutchings738a8f42009-11-29 15:16:05 +0000979 if (st->protocol == htons(ETH_P_IP)) {
Ben Hutchings97142842012-06-22 02:44:01 +0100980 struct iphdr *tsoh_iph = (struct iphdr *)(header + st->ip_off);
Ben Hutchings738a8f42009-11-29 15:16:05 +0000981
982 tsoh_iph->tot_len = htons(ip_length);
983
984 /* Linux leaves suitable gaps in the IP ID space for us to fill. */
985 tsoh_iph->id = htons(st->ipv4_id);
986 st->ipv4_id++;
987 } else {
988 struct ipv6hdr *tsoh_iph =
Ben Hutchings97142842012-06-22 02:44:01 +0100989 (struct ipv6hdr *)(header + st->ip_off);
Ben Hutchings738a8f42009-11-29 15:16:05 +0000990
Ben Hutchings53cb13c2012-06-19 20:03:41 +0100991 tsoh_iph->payload_len = htons(ip_length);
Ben Hutchings738a8f42009-11-29 15:16:05 +0000992 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100993
Ben Hutchingsf7251a92012-05-17 18:40:54 +0100994 rc = efx_tso_put_header(tx_queue, buffer, header);
995 if (unlikely(rc))
996 return rc;
997
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100998 ++tx_queue->tso_packets;
999
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001000 return 0;
1001}
1002
1003
1004/**
1005 * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
1006 * @tx_queue: Efx TX queue
1007 * @skb: Socket buffer
1008 *
1009 * Context: You must hold netif_tx_lock() to call this function.
1010 *
1011 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1012 * @skb was not enqueued. In all cases @skb is consumed. Return
Ben Hutchings14bf7182012-05-22 01:27:58 +01001013 * %NETDEV_TX_OK.
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001014 */
1015static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
Ben Hutchings740847d2008-09-01 12:48:23 +01001016 struct sk_buff *skb)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001017{
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001018 struct efx_nic *efx = tx_queue->efx;
Ben Hutchings14bf7182012-05-22 01:27:58 +01001019 int frag_i, rc;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001020 struct tso_state state;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001021
Ben Hutchings738a8f42009-11-29 15:16:05 +00001022 /* Find the packet protocol and sanity-check it */
1023 state.protocol = efx_tso_check_protocol(skb);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001024
1025 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
1026
1027 tso_start(&state, skb);
1028
1029 /* Assume that skb header area contains exactly the headers, and
1030 * all payload is in the frag list.
1031 */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001032 if (skb_headlen(skb) == state.header_len) {
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001033 /* Grab the first payload fragment. */
1034 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
1035 frag_i = 0;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001036 rc = tso_get_fragment(&state, efx,
1037 skb_shinfo(skb)->frags + frag_i);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001038 if (rc)
1039 goto mem_err;
1040 } else {
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001041 rc = tso_get_head_fragment(&state, efx, skb);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001042 if (rc)
1043 goto mem_err;
1044 frag_i = -1;
1045 }
1046
1047 if (tso_start_new_packet(tx_queue, skb, &state) < 0)
1048 goto mem_err;
1049
1050 while (1) {
Ben Hutchings14bf7182012-05-22 01:27:58 +01001051 tso_fill_packet_with_fragment(tx_queue, skb, &state);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001052
1053 /* Move onto the next fragment? */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001054 if (state.in_len == 0) {
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001055 if (++frag_i >= skb_shinfo(skb)->nr_frags)
1056 /* End of payload reached. */
1057 break;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001058 rc = tso_get_fragment(&state, efx,
1059 skb_shinfo(skb)->frags + frag_i);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001060 if (rc)
1061 goto mem_err;
1062 }
1063
1064 /* Start at new packet? */
1065 if (state.packet_space == 0 &&
1066 tso_start_new_packet(tx_queue, skb, &state) < 0)
1067 goto mem_err;
1068 }
1069
Eric Dumazet449fa022011-11-30 17:12:27 -05001070 netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
1071
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001072 /* Pass off to hardware */
Ben Hutchings152b6a62009-11-29 03:43:56 +00001073 efx_nic_push_buffers(tx_queue);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001074
Ben Hutchings14bf7182012-05-22 01:27:58 +01001075 efx_tx_maybe_stop_queue(tx_queue);
1076
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001077 tx_queue->tso_bursts++;
1078 return NETDEV_TX_OK;
1079
1080 mem_err:
Ben Hutchings62776d02010-06-23 11:30:07 +00001081 netif_err(efx, tx_err, efx->net_dev,
Ben Hutchings0e33d872012-05-17 17:46:55 +01001082 "Out of memory for TSO headers, or DMA mapping error\n");
Ben Hutchings9bc183d2009-11-23 16:06:47 +00001083 dev_kfree_skb_any(skb);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001084
Ben Hutchings5988b632008-09-01 12:46:36 +01001085 /* Free the DMA mapping we were in the process of writing out */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001086 if (state.unmap_len) {
Ben Hutchings7668ff92012-05-17 20:52:20 +01001087 if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE)
Ben Hutchings0e33d872012-05-17 17:46:55 +01001088 dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr,
1089 state.unmap_len, DMA_TO_DEVICE);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001090 else
Ben Hutchings0e33d872012-05-17 17:46:55 +01001091 dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr,
1092 state.unmap_len, DMA_TO_DEVICE);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001093 }
Ben Hutchings5988b632008-09-01 12:46:36 +01001094
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001095 efx_enqueue_unwind(tx_queue);
Ben Hutchings14bf7182012-05-22 01:27:58 +01001096 return NETDEV_TX_OK;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001097}