blob: fef22351ddbde2d7a9afd129a77934e33fcf9b0a [file] [log] [blame]
Ben Hutchings8ceee662008-04-27 12:55:59 +01001/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
Ben Hutchings906bb262009-11-29 15:16:19 +00004 * Copyright 2005-2009 Solarflare Communications Inc.
Ben Hutchings8ceee662008-04-27 12:55:59 +01005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/pci.h>
12#include <linux/tcp.h>
13#include <linux/ip.h>
14#include <linux/in.h>
Ben Hutchings738a8f42009-11-29 15:16:05 +000015#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/slab.h>
Ben Hutchings738a8f42009-11-29 15:16:05 +000017#include <net/ipv6.h>
Ben Hutchings8ceee662008-04-27 12:55:59 +010018#include <linux/if_ether.h>
19#include <linux/highmem.h>
20#include "net_driver.h"
Ben Hutchings8ceee662008-04-27 12:55:59 +010021#include "efx.h"
Ben Hutchings744093c2009-11-29 15:12:08 +000022#include "nic.h"
Ben Hutchings8ceee662008-04-27 12:55:59 +010023#include "workarounds.h"
24
25/*
26 * TX descriptor ring full threshold
27 *
28 * The tx_queue descriptor ring fill-level must fall below this value
29 * before we restart the netif queue
30 */
Steve Hodgsonecc910f2010-09-10 06:42:22 +000031#define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
Ben Hutchings8ceee662008-04-27 12:55:59 +010032
Ben Hutchingsa4900ac2010-04-28 09:30:43 +000033/* We need to be able to nest calls to netif_tx_stop_queue(), partly
34 * because of the 2 hardware queues associated with each core queue,
35 * but also so that we can inhibit TX for reasons other than a full
36 * hardware queue. */
37void efx_stop_queue(struct efx_channel *channel)
Ben Hutchings8ceee662008-04-27 12:55:59 +010038{
Ben Hutchingsa4900ac2010-04-28 09:30:43 +000039 struct efx_nic *efx = channel->efx;
Ben Hutchingsf7d12cd2010-09-10 06:41:47 +000040 struct efx_tx_queue *tx_queue = efx_channel_get_tx_queue(channel, 0);
Ben Hutchingsa4900ac2010-04-28 09:30:43 +000041
Ben Hutchingsf7d12cd2010-09-10 06:41:47 +000042 if (!tx_queue)
Ben Hutchingsa4900ac2010-04-28 09:30:43 +000043 return;
44
45 spin_lock_bh(&channel->tx_stop_lock);
Ben Hutchings62776d02010-06-23 11:30:07 +000046 netif_vdbg(efx, tx_queued, efx->net_dev, "stop TX queue\n");
Ben Hutchings8ceee662008-04-27 12:55:59 +010047
Ben Hutchingsa4900ac2010-04-28 09:30:43 +000048 atomic_inc(&channel->tx_stop_count);
49 netif_tx_stop_queue(
Ben Hutchingsf7d12cd2010-09-10 06:41:47 +000050 netdev_get_tx_queue(efx->net_dev,
51 tx_queue->queue / EFX_TXQ_TYPES));
Ben Hutchings8ceee662008-04-27 12:55:59 +010052
Ben Hutchingsa4900ac2010-04-28 09:30:43 +000053 spin_unlock_bh(&channel->tx_stop_lock);
Ben Hutchings8ceee662008-04-27 12:55:59 +010054}
55
Ben Hutchingsa4900ac2010-04-28 09:30:43 +000056/* Decrement core TX queue stop count and wake it if the count is 0 */
57void efx_wake_queue(struct efx_channel *channel)
Ben Hutchings8ceee662008-04-27 12:55:59 +010058{
Ben Hutchingsa4900ac2010-04-28 09:30:43 +000059 struct efx_nic *efx = channel->efx;
Ben Hutchingsf7d12cd2010-09-10 06:41:47 +000060 struct efx_tx_queue *tx_queue = efx_channel_get_tx_queue(channel, 0);
Ben Hutchingsa4900ac2010-04-28 09:30:43 +000061
Ben Hutchingsf7d12cd2010-09-10 06:41:47 +000062 if (!tx_queue)
Ben Hutchingsa4900ac2010-04-28 09:30:43 +000063 return;
64
Ben Hutchings8ceee662008-04-27 12:55:59 +010065 local_bh_disable();
Ben Hutchingsa4900ac2010-04-28 09:30:43 +000066 if (atomic_dec_and_lock(&channel->tx_stop_count,
67 &channel->tx_stop_lock)) {
Ben Hutchings62776d02010-06-23 11:30:07 +000068 netif_vdbg(efx, tx_queued, efx->net_dev, "waking TX queue\n");
Ben Hutchingsa4900ac2010-04-28 09:30:43 +000069 netif_tx_wake_queue(
Ben Hutchingsf7d12cd2010-09-10 06:41:47 +000070 netdev_get_tx_queue(efx->net_dev,
71 tx_queue->queue / EFX_TXQ_TYPES));
Ben Hutchingsa4900ac2010-04-28 09:30:43 +000072 spin_unlock(&channel->tx_stop_lock);
Ben Hutchings8ceee662008-04-27 12:55:59 +010073 }
74 local_bh_enable();
75}
76
Ben Hutchings4d566062008-09-01 12:47:12 +010077static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
78 struct efx_tx_buffer *buffer)
Ben Hutchings8ceee662008-04-27 12:55:59 +010079{
80 if (buffer->unmap_len) {
81 struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
Ben Hutchingscc12dac2008-09-01 12:46:43 +010082 dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
83 buffer->unmap_len);
Ben Hutchings8ceee662008-04-27 12:55:59 +010084 if (buffer->unmap_single)
Ben Hutchingscc12dac2008-09-01 12:46:43 +010085 pci_unmap_single(pci_dev, unmap_addr, buffer->unmap_len,
86 PCI_DMA_TODEVICE);
Ben Hutchings8ceee662008-04-27 12:55:59 +010087 else
Ben Hutchingscc12dac2008-09-01 12:46:43 +010088 pci_unmap_page(pci_dev, unmap_addr, buffer->unmap_len,
89 PCI_DMA_TODEVICE);
Ben Hutchings8ceee662008-04-27 12:55:59 +010090 buffer->unmap_len = 0;
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +010091 buffer->unmap_single = false;
Ben Hutchings8ceee662008-04-27 12:55:59 +010092 }
93
94 if (buffer->skb) {
95 dev_kfree_skb_any((struct sk_buff *) buffer->skb);
96 buffer->skb = NULL;
Ben Hutchings62776d02010-06-23 11:30:07 +000097 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
98 "TX queue %d transmission id %x complete\n",
99 tx_queue->queue, tx_queue->read_count);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100100 }
101}
102
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100103/**
104 * struct efx_tso_header - a DMA mapped buffer for packet headers
105 * @next: Linked list of free ones.
106 * The list is protected by the TX queue lock.
107 * @dma_unmap_len: Length to unmap for an oversize buffer, or 0.
108 * @dma_addr: The DMA address of the header below.
109 *
110 * This controls the memory used for a TSO header. Use TSOH_DATA()
111 * to find the packet header data. Use TSOH_SIZE() to calculate the
112 * total size required for a given packet header length. TSO headers
113 * in the free list are exactly %TSOH_STD_SIZE bytes in size.
114 */
115struct efx_tso_header {
116 union {
117 struct efx_tso_header *next;
118 size_t unmap_len;
119 };
120 dma_addr_t dma_addr;
121};
122
123static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
Ben Hutchings740847d2008-09-01 12:48:23 +0100124 struct sk_buff *skb);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100125static void efx_fini_tso(struct efx_tx_queue *tx_queue);
126static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue,
127 struct efx_tso_header *tsoh);
128
Ben Hutchings4d566062008-09-01 12:47:12 +0100129static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
130 struct efx_tx_buffer *buffer)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100131{
132 if (buffer->tsoh) {
133 if (likely(!buffer->tsoh->unmap_len)) {
134 buffer->tsoh->next = tx_queue->tso_headers_free;
135 tx_queue->tso_headers_free = buffer->tsoh;
136 } else {
137 efx_tsoh_heap_free(tx_queue, buffer->tsoh);
138 }
139 buffer->tsoh = NULL;
140 }
141}
142
Ben Hutchings8ceee662008-04-27 12:55:59 +0100143
Ben Hutchings63f19882009-10-23 08:31:20 +0000144static inline unsigned
145efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
146{
147 /* Depending on the NIC revision, we can use descriptor
148 * lengths up to 8K or 8K-1. However, since PCI Express
149 * devices must split read requests at 4K boundaries, there is
150 * little benefit from using descriptors that cross those
151 * boundaries and we keep things simple by not doing so.
152 */
153 unsigned len = (~dma_addr & 0xfff) + 1;
154
155 /* Work around hardware bug for unaligned buffers. */
156 if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
157 len = min_t(unsigned, len, 512 - (dma_addr & 0xf));
158
159 return len;
160}
161
Ben Hutchings8ceee662008-04-27 12:55:59 +0100162/*
163 * Add a socket buffer to a TX queue
164 *
165 * This maps all fragments of a socket buffer for DMA and adds them to
166 * the TX queue. The queue's insert pointer will be incremented by
167 * the number of fragments in the socket buffer.
168 *
169 * If any DMA mapping fails, any mapped fragments will be unmapped,
170 * the queue's insert pointer will be restored to its original value.
171 *
Ben Hutchings497f5ba2009-11-23 16:07:05 +0000172 * This function is split out from efx_hard_start_xmit to allow the
173 * loopback test to direct packets via specific TX queues.
174 *
Ben Hutchings8ceee662008-04-27 12:55:59 +0100175 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
176 * You must hold netif_tx_lock() to call this function.
177 */
Ben Hutchings497f5ba2009-11-23 16:07:05 +0000178netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100179{
180 struct efx_nic *efx = tx_queue->efx;
181 struct pci_dev *pci_dev = efx->pci_dev;
182 struct efx_tx_buffer *buffer;
183 skb_frag_t *fragment;
184 struct page *page;
185 int page_offset;
Ben Hutchings63f19882009-10-23 08:31:20 +0000186 unsigned int len, unmap_len = 0, fill_level, insert_ptr;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100187 dma_addr_t dma_addr, unmap_addr = 0;
188 unsigned int dma_len;
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100189 bool unmap_single;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100190 int q_space, i = 0;
Stephen Hemminger613573252009-08-31 19:50:58 +0000191 netdev_tx_t rc = NETDEV_TX_OK;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100192
193 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
194
Ben Hutchings9bc183d2009-11-23 16:06:47 +0000195 if (skb_shinfo(skb)->gso_size)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100196 return efx_enqueue_skb_tso(tx_queue, skb);
197
Ben Hutchings8ceee662008-04-27 12:55:59 +0100198 /* Get size of the initial fragment */
199 len = skb_headlen(skb);
200
Ben Hutchingsbb145a92009-03-20 13:25:39 +0000201 /* Pad if necessary */
202 if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) {
203 EFX_BUG_ON_PARANOID(skb->data_len);
204 len = 32 + 1;
205 if (skb_pad(skb, len - skb->len))
206 return NETDEV_TX_OK;
207 }
208
Ben Hutchings8ceee662008-04-27 12:55:59 +0100209 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000210 q_space = efx->txq_entries - 1 - fill_level;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100211
212 /* Map for DMA. Use pci_map_single rather than pci_map_page
213 * since this is more efficient on machines with sparse
214 * memory.
215 */
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100216 unmap_single = true;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100217 dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE);
218
219 /* Process all fragments */
220 while (1) {
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700221 if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr)))
Ben Hutchings8ceee662008-04-27 12:55:59 +0100222 goto pci_err;
223
224 /* Store fields for marking in the per-fragment final
225 * descriptor */
226 unmap_len = len;
227 unmap_addr = dma_addr;
228
229 /* Add to TX queue, splitting across DMA boundaries */
230 do {
231 if (unlikely(q_space-- <= 0)) {
232 /* It might be that completions have
233 * happened since the xmit path last
234 * checked. Update the xmit path's
235 * copy of read_count.
236 */
237 ++tx_queue->stopped;
238 /* This memory barrier protects the
239 * change of stopped from the access
240 * of read_count. */
241 smp_mb();
242 tx_queue->old_read_count =
Ben Hutchings51c56f42010-11-10 18:46:40 +0000243 ACCESS_ONCE(tx_queue->read_count);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100244 fill_level = (tx_queue->insert_count
245 - tx_queue->old_read_count);
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000246 q_space = efx->txq_entries - 1 - fill_level;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100247 if (unlikely(q_space-- <= 0))
248 goto stop;
249 smp_mb();
250 --tx_queue->stopped;
251 }
252
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000253 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100254 buffer = &tx_queue->buffer[insert_ptr];
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100255 efx_tsoh_free(tx_queue, buffer);
256 EFX_BUG_ON_PARANOID(buffer->tsoh);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100257 EFX_BUG_ON_PARANOID(buffer->skb);
258 EFX_BUG_ON_PARANOID(buffer->len);
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100259 EFX_BUG_ON_PARANOID(!buffer->continuation);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100260 EFX_BUG_ON_PARANOID(buffer->unmap_len);
261
Ben Hutchings63f19882009-10-23 08:31:20 +0000262 dma_len = efx_max_tx_len(efx, dma_addr);
263 if (likely(dma_len >= len))
Ben Hutchings8ceee662008-04-27 12:55:59 +0100264 dma_len = len;
265
Ben Hutchings8ceee662008-04-27 12:55:59 +0100266 /* Fill out per descriptor fields */
267 buffer->len = dma_len;
268 buffer->dma_addr = dma_addr;
269 len -= dma_len;
270 dma_addr += dma_len;
271 ++tx_queue->insert_count;
272 } while (len);
273
274 /* Transfer ownership of the unmapping to the final buffer */
Ben Hutchings8ceee662008-04-27 12:55:59 +0100275 buffer->unmap_single = unmap_single;
276 buffer->unmap_len = unmap_len;
277 unmap_len = 0;
278
279 /* Get address and size of next fragment */
280 if (i >= skb_shinfo(skb)->nr_frags)
281 break;
282 fragment = &skb_shinfo(skb)->frags[i];
283 len = fragment->size;
284 page = fragment->page;
285 page_offset = fragment->page_offset;
286 i++;
287 /* Map for DMA */
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100288 unmap_single = false;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100289 dma_addr = pci_map_page(pci_dev, page, page_offset, len,
290 PCI_DMA_TODEVICE);
291 }
292
293 /* Transfer ownership of the skb to the final buffer */
294 buffer->skb = skb;
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100295 buffer->continuation = false;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100296
297 /* Pass off to hardware */
Ben Hutchings152b6a62009-11-29 03:43:56 +0000298 efx_nic_push_buffers(tx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100299
300 return NETDEV_TX_OK;
301
302 pci_err:
Ben Hutchings62776d02010-06-23 11:30:07 +0000303 netif_err(efx, tx_err, efx->net_dev,
304 " TX queue %d could not map skb with %d bytes %d "
305 "fragments for DMA\n", tx_queue->queue, skb->len,
306 skb_shinfo(skb)->nr_frags + 1);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100307
308 /* Mark the packet as transmitted, and free the SKB ourselves */
Ben Hutchings9bc183d2009-11-23 16:06:47 +0000309 dev_kfree_skb_any(skb);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100310 goto unwind;
311
312 stop:
313 rc = NETDEV_TX_BUSY;
314
315 if (tx_queue->stopped == 1)
Ben Hutchingsa4900ac2010-04-28 09:30:43 +0000316 efx_stop_queue(tx_queue->channel);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100317
318 unwind:
319 /* Work backwards until we hit the original insert pointer value */
320 while (tx_queue->insert_count != tx_queue->write_count) {
321 --tx_queue->insert_count;
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000322 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100323 buffer = &tx_queue->buffer[insert_ptr];
324 efx_dequeue_buffer(tx_queue, buffer);
325 buffer->len = 0;
326 }
327
328 /* Free the fragment we were mid-way through pushing */
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100329 if (unmap_len) {
330 if (unmap_single)
331 pci_unmap_single(pci_dev, unmap_addr, unmap_len,
332 PCI_DMA_TODEVICE);
333 else
334 pci_unmap_page(pci_dev, unmap_addr, unmap_len,
335 PCI_DMA_TODEVICE);
336 }
Ben Hutchings8ceee662008-04-27 12:55:59 +0100337
338 return rc;
339}
340
341/* Remove packets from the TX queue
342 *
343 * This removes packets from the TX queue, up to and including the
344 * specified index.
345 */
Ben Hutchings4d566062008-09-01 12:47:12 +0100346static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
347 unsigned int index)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100348{
349 struct efx_nic *efx = tx_queue->efx;
350 unsigned int stop_index, read_ptr;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100351
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000352 stop_index = (index + 1) & tx_queue->ptr_mask;
353 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100354
355 while (read_ptr != stop_index) {
356 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
357 if (unlikely(buffer->len == 0)) {
Ben Hutchings62776d02010-06-23 11:30:07 +0000358 netif_err(efx, tx_err, efx->net_dev,
359 "TX queue %d spurious TX completion id %x\n",
360 tx_queue->queue, read_ptr);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100361 efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
362 return;
363 }
364
365 efx_dequeue_buffer(tx_queue, buffer);
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100366 buffer->continuation = true;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100367 buffer->len = 0;
368
369 ++tx_queue->read_count;
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000370 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100371 }
372}
373
Ben Hutchings8ceee662008-04-27 12:55:59 +0100374/* Initiate a packet transmission. We use one channel per CPU
375 * (sharing when we have more CPUs than channels). On Falcon, the TX
376 * completion events will be directed back to the CPU that transmitted
377 * the packet, which should be cache-efficient.
378 *
379 * Context: non-blocking.
380 * Note that returning anything other than NETDEV_TX_OK will cause the
381 * OS to free the skb.
382 */
Stephen Hemminger613573252009-08-31 19:50:58 +0000383netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
384 struct net_device *net_dev)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100385{
Ben Hutchings767e4682008-09-01 12:43:14 +0100386 struct efx_nic *efx = netdev_priv(net_dev);
Ben Hutchings60ac1062008-09-01 12:44:59 +0100387 struct efx_tx_queue *tx_queue;
388
Ben Hutchingsa7ef5932009-03-04 09:52:37 +0000389 if (unlikely(efx->port_inhibited))
390 return NETDEV_TX_BUSY;
391
Ben Hutchingsf7d12cd2010-09-10 06:41:47 +0000392 tx_queue = efx_get_tx_queue(efx, skb_get_queue_mapping(skb),
393 skb->ip_summed == CHECKSUM_PARTIAL ?
394 EFX_TXQ_TYPE_OFFLOAD : 0);
Ben Hutchings60ac1062008-09-01 12:44:59 +0100395
Ben Hutchings497f5ba2009-11-23 16:07:05 +0000396 return efx_enqueue_skb(tx_queue, skb);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100397}
398
399void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
400{
401 unsigned fill_level;
402 struct efx_nic *efx = tx_queue->efx;
Steve Hodgson4f3907e2010-12-02 13:48:14 +0000403 struct netdev_queue *queue;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100404
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000405 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100406
407 efx_dequeue_buffers(tx_queue, index);
408
409 /* See if we need to restart the netif queue. This barrier
410 * separates the update of read_count from the test of
411 * stopped. */
412 smp_mb();
Ben Hutchings32d76002009-03-04 09:53:15 +0000413 if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) {
Ben Hutchings8ceee662008-04-27 12:55:59 +0100414 fill_level = tx_queue->insert_count - tx_queue->read_count;
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000415 if (fill_level < EFX_TXQ_THRESHOLD(efx)) {
Ben Hutchings55668612008-05-16 21:16:10 +0100416 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
Ben Hutchings8ceee662008-04-27 12:55:59 +0100417
418 /* Do this under netif_tx_lock(), to avoid racing
419 * with efx_xmit(). */
Steve Hodgson4f3907e2010-12-02 13:48:14 +0000420 queue = netdev_get_tx_queue(
421 efx->net_dev,
422 tx_queue->queue / EFX_TXQ_TYPES);
423 __netif_tx_lock(queue, smp_processor_id());
Ben Hutchings8ceee662008-04-27 12:55:59 +0100424 if (tx_queue->stopped) {
425 tx_queue->stopped = 0;
Ben Hutchingsa4900ac2010-04-28 09:30:43 +0000426 efx_wake_queue(tx_queue->channel);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100427 }
Steve Hodgson4f3907e2010-12-02 13:48:14 +0000428 __netif_tx_unlock(queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100429 }
430 }
431}
432
433int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
434{
435 struct efx_nic *efx = tx_queue->efx;
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000436 unsigned int entries;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100437 int i, rc;
438
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000439 /* Create the smallest power-of-two aligned ring */
440 entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
441 EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
442 tx_queue->ptr_mask = entries - 1;
443
444 netif_dbg(efx, probe, efx->net_dev,
445 "creating TX queue %d size %#x mask %#x\n",
446 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100447
448 /* Allocate software ring */
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000449 tx_queue->buffer = kzalloc(entries * sizeof(*tx_queue->buffer),
450 GFP_KERNEL);
Ben Hutchings60ac1062008-09-01 12:44:59 +0100451 if (!tx_queue->buffer)
452 return -ENOMEM;
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000453 for (i = 0; i <= tx_queue->ptr_mask; ++i)
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100454 tx_queue->buffer[i].continuation = true;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100455
456 /* Allocate hardware ring */
Ben Hutchings152b6a62009-11-29 03:43:56 +0000457 rc = efx_nic_probe_tx(tx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100458 if (rc)
Ben Hutchings60ac1062008-09-01 12:44:59 +0100459 goto fail;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100460
461 return 0;
462
Ben Hutchings60ac1062008-09-01 12:44:59 +0100463 fail:
Ben Hutchings8ceee662008-04-27 12:55:59 +0100464 kfree(tx_queue->buffer);
465 tx_queue->buffer = NULL;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100466 return rc;
467}
468
Ben Hutchingsbc3c90a2008-09-01 12:48:46 +0100469void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100470{
Ben Hutchings62776d02010-06-23 11:30:07 +0000471 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
472 "initialising TX queue %d\n", tx_queue->queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100473
474 tx_queue->insert_count = 0;
475 tx_queue->write_count = 0;
476 tx_queue->read_count = 0;
477 tx_queue->old_read_count = 0;
478 BUG_ON(tx_queue->stopped);
479
480 /* Set up TX descriptor ring */
Ben Hutchings152b6a62009-11-29 03:43:56 +0000481 efx_nic_init_tx(tx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100482}
483
484void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
485{
486 struct efx_tx_buffer *buffer;
487
488 if (!tx_queue->buffer)
489 return;
490
491 /* Free any buffers left in the ring */
492 while (tx_queue->read_count != tx_queue->write_count) {
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000493 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
Ben Hutchings8ceee662008-04-27 12:55:59 +0100494 efx_dequeue_buffer(tx_queue, buffer);
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100495 buffer->continuation = true;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100496 buffer->len = 0;
497
498 ++tx_queue->read_count;
499 }
500}
501
502void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
503{
Ben Hutchings62776d02010-06-23 11:30:07 +0000504 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
505 "shutting down TX queue %d\n", tx_queue->queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100506
507 /* Flush TX queue, remove descriptor ring */
Ben Hutchings152b6a62009-11-29 03:43:56 +0000508 efx_nic_fini_tx(tx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100509
510 efx_release_tx_buffers(tx_queue);
511
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100512 /* Free up TSO header cache */
513 efx_fini_tso(tx_queue);
514
Ben Hutchings8ceee662008-04-27 12:55:59 +0100515 /* Release queue's stop on port, if any */
516 if (tx_queue->stopped) {
517 tx_queue->stopped = 0;
Ben Hutchingsa4900ac2010-04-28 09:30:43 +0000518 efx_wake_queue(tx_queue->channel);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100519 }
520}
521
522void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
523{
Ben Hutchings62776d02010-06-23 11:30:07 +0000524 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
525 "destroying TX queue %d\n", tx_queue->queue);
Ben Hutchings152b6a62009-11-29 03:43:56 +0000526 efx_nic_remove_tx(tx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100527
528 kfree(tx_queue->buffer);
529 tx_queue->buffer = NULL;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100530}
531
532
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100533/* Efx TCP segmentation acceleration.
534 *
535 * Why? Because by doing it here in the driver we can go significantly
536 * faster than the GSO.
537 *
538 * Requires TX checksum offload support.
539 */
540
541/* Number of bytes inserted at the start of a TSO header buffer,
542 * similar to NET_IP_ALIGN.
543 */
Ben Hutchings13e9ab12008-09-01 12:50:28 +0100544#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100545#define TSOH_OFFSET 0
546#else
547#define TSOH_OFFSET NET_IP_ALIGN
548#endif
549
550#define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET)
551
552/* Total size of struct efx_tso_header, buffer and padding */
553#define TSOH_SIZE(hdr_len) \
554 (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len)
555
556/* Size of blocks on free list. Larger blocks must be allocated from
557 * the heap.
558 */
559#define TSOH_STD_SIZE 128
560
561#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
562#define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data)
563#define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data)
564#define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data)
Ben Hutchings738a8f42009-11-29 15:16:05 +0000565#define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100566
567/**
568 * struct tso_state - TSO state for an SKB
Ben Hutchings23d9e602008-09-01 12:47:02 +0100569 * @out_len: Remaining length in current segment
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100570 * @seqnum: Current sequence number
Ben Hutchings23d9e602008-09-01 12:47:02 +0100571 * @ipv4_id: Current IPv4 ID, host endian
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100572 * @packet_space: Remaining space in current packet
Ben Hutchings23d9e602008-09-01 12:47:02 +0100573 * @dma_addr: DMA address of current position
574 * @in_len: Remaining length in current SKB fragment
575 * @unmap_len: Length of SKB fragment
576 * @unmap_addr: DMA address of SKB fragment
577 * @unmap_single: DMA single vs page mapping flag
Ben Hutchings738a8f42009-11-29 15:16:05 +0000578 * @protocol: Network protocol (after any VLAN header)
Ben Hutchings23d9e602008-09-01 12:47:02 +0100579 * @header_len: Number of bytes of header
580 * @full_packet_size: Number of bytes to put in each outgoing segment
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100581 *
582 * The state used during segmentation. It is put into this data structure
583 * just to make it easy to pass into inline functions.
584 */
585struct tso_state {
Ben Hutchings23d9e602008-09-01 12:47:02 +0100586 /* Output position */
587 unsigned out_len;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100588 unsigned seqnum;
Ben Hutchings23d9e602008-09-01 12:47:02 +0100589 unsigned ipv4_id;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100590 unsigned packet_space;
591
Ben Hutchings23d9e602008-09-01 12:47:02 +0100592 /* Input position */
593 dma_addr_t dma_addr;
594 unsigned in_len;
595 unsigned unmap_len;
596 dma_addr_t unmap_addr;
597 bool unmap_single;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100598
Ben Hutchings738a8f42009-11-29 15:16:05 +0000599 __be16 protocol;
Ben Hutchings23d9e602008-09-01 12:47:02 +0100600 unsigned header_len;
601 int full_packet_size;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100602};
603
604
605/*
606 * Verify that our various assumptions about sk_buffs and the conditions
Ben Hutchings738a8f42009-11-29 15:16:05 +0000607 * under which TSO will be attempted hold true. Return the protocol number.
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100608 */
Ben Hutchings738a8f42009-11-29 15:16:05 +0000609static __be16 efx_tso_check_protocol(struct sk_buff *skb)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100610{
Ben Hutchings740847d2008-09-01 12:48:23 +0100611 __be16 protocol = skb->protocol;
612
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100613 EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
Ben Hutchings740847d2008-09-01 12:48:23 +0100614 protocol);
615 if (protocol == htons(ETH_P_8021Q)) {
616 /* Find the encapsulated protocol; reset network header
617 * and transport header based on that. */
618 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
619 protocol = veh->h_vlan_encapsulated_proto;
620 skb_set_network_header(skb, sizeof(*veh));
621 if (protocol == htons(ETH_P_IP))
622 skb_set_transport_header(skb, sizeof(*veh) +
623 4 * ip_hdr(skb)->ihl);
Ben Hutchings738a8f42009-11-29 15:16:05 +0000624 else if (protocol == htons(ETH_P_IPV6))
625 skb_set_transport_header(skb, sizeof(*veh) +
626 sizeof(struct ipv6hdr));
Ben Hutchings740847d2008-09-01 12:48:23 +0100627 }
628
Ben Hutchings738a8f42009-11-29 15:16:05 +0000629 if (protocol == htons(ETH_P_IP)) {
630 EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
631 } else {
632 EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6));
633 EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP);
634 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100635 EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
636 + (tcp_hdr(skb)->doff << 2u)) >
637 skb_headlen(skb));
Ben Hutchings738a8f42009-11-29 15:16:05 +0000638
639 return protocol;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100640}
641
642
643/*
644 * Allocate a page worth of efx_tso_header structures, and string them
645 * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM.
646 */
647static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
648{
649
650 struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
651 struct efx_tso_header *tsoh;
652 dma_addr_t dma_addr;
653 u8 *base_kva, *kva;
654
655 base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr);
656 if (base_kva == NULL) {
Ben Hutchings62776d02010-06-23 11:30:07 +0000657 netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev,
658 "Unable to allocate page for TSO headers\n");
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100659 return -ENOMEM;
660 }
661
662 /* pci_alloc_consistent() allocates pages. */
663 EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u));
664
665 for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) {
666 tsoh = (struct efx_tso_header *)kva;
667 tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva);
668 tsoh->next = tx_queue->tso_headers_free;
669 tx_queue->tso_headers_free = tsoh;
670 }
671
672 return 0;
673}
674
675
676/* Free up a TSO header, and all others in the same page. */
677static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
678 struct efx_tso_header *tsoh,
679 struct pci_dev *pci_dev)
680{
681 struct efx_tso_header **p;
682 unsigned long base_kva;
683 dma_addr_t base_dma;
684
685 base_kva = (unsigned long)tsoh & PAGE_MASK;
686 base_dma = tsoh->dma_addr & PAGE_MASK;
687
688 p = &tx_queue->tso_headers_free;
Ben Hutchingsb3475642008-05-16 21:15:49 +0100689 while (*p != NULL) {
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100690 if (((unsigned long)*p & PAGE_MASK) == base_kva)
691 *p = (*p)->next;
692 else
693 p = &(*p)->next;
Ben Hutchingsb3475642008-05-16 21:15:49 +0100694 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100695
696 pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma);
697}
698
699static struct efx_tso_header *
700efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
701{
702 struct efx_tso_header *tsoh;
703
704 tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA);
705 if (unlikely(!tsoh))
706 return NULL;
707
708 tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev,
709 TSOH_BUFFER(tsoh), header_len,
710 PCI_DMA_TODEVICE);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700711 if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev,
712 tsoh->dma_addr))) {
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100713 kfree(tsoh);
714 return NULL;
715 }
716
717 tsoh->unmap_len = header_len;
718 return tsoh;
719}
720
721static void
722efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
723{
724 pci_unmap_single(tx_queue->efx->pci_dev,
725 tsoh->dma_addr, tsoh->unmap_len,
726 PCI_DMA_TODEVICE);
727 kfree(tsoh);
728}
729
730/**
731 * efx_tx_queue_insert - push descriptors onto the TX queue
732 * @tx_queue: Efx TX queue
733 * @dma_addr: DMA address of fragment
734 * @len: Length of fragment
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100735 * @final_buffer: The final buffer inserted into the queue
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100736 *
737 * Push descriptors onto the TX queue. Return 0 on success or 1 if
738 * @tx_queue full.
739 */
740static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
741 dma_addr_t dma_addr, unsigned len,
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100742 struct efx_tx_buffer **final_buffer)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100743{
744 struct efx_tx_buffer *buffer;
745 struct efx_nic *efx = tx_queue->efx;
Ben Hutchings63f19882009-10-23 08:31:20 +0000746 unsigned dma_len, fill_level, insert_ptr;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100747 int q_space;
748
749 EFX_BUG_ON_PARANOID(len <= 0);
750
751 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
752 /* -1 as there is no way to represent all descriptors used */
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000753 q_space = efx->txq_entries - 1 - fill_level;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100754
755 while (1) {
756 if (unlikely(q_space-- <= 0)) {
757 /* It might be that completions have happened
758 * since the xmit path last checked. Update
759 * the xmit path's copy of read_count.
760 */
761 ++tx_queue->stopped;
762 /* This memory barrier protects the change of
763 * stopped from the access of read_count. */
764 smp_mb();
765 tx_queue->old_read_count =
Ben Hutchings51c56f42010-11-10 18:46:40 +0000766 ACCESS_ONCE(tx_queue->read_count);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100767 fill_level = (tx_queue->insert_count
768 - tx_queue->old_read_count);
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000769 q_space = efx->txq_entries - 1 - fill_level;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100770 if (unlikely(q_space-- <= 0)) {
771 *final_buffer = NULL;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100772 return 1;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100773 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100774 smp_mb();
775 --tx_queue->stopped;
776 }
777
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000778 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100779 buffer = &tx_queue->buffer[insert_ptr];
780 ++tx_queue->insert_count;
781
782 EFX_BUG_ON_PARANOID(tx_queue->insert_count -
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000783 tx_queue->read_count >=
784 efx->txq_entries);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100785
786 efx_tsoh_free(tx_queue, buffer);
787 EFX_BUG_ON_PARANOID(buffer->len);
788 EFX_BUG_ON_PARANOID(buffer->unmap_len);
789 EFX_BUG_ON_PARANOID(buffer->skb);
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100790 EFX_BUG_ON_PARANOID(!buffer->continuation);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100791 EFX_BUG_ON_PARANOID(buffer->tsoh);
792
793 buffer->dma_addr = dma_addr;
794
Ben Hutchings63f19882009-10-23 08:31:20 +0000795 dma_len = efx_max_tx_len(efx, dma_addr);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100796
797 /* If there is enough space to send then do so */
798 if (dma_len >= len)
799 break;
800
801 buffer->len = dma_len; /* Don't set the other members */
802 dma_addr += dma_len;
803 len -= dma_len;
804 }
805
806 EFX_BUG_ON_PARANOID(!len);
807 buffer->len = len;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100808 *final_buffer = buffer;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100809 return 0;
810}
811
812
813/*
814 * Put a TSO header into the TX queue.
815 *
816 * This is special-cased because we know that it is small enough to fit in
817 * a single fragment, and we know it doesn't cross a page boundary. It
818 * also allows us to not worry about end-of-packet etc.
819 */
Ben Hutchings4d566062008-09-01 12:47:12 +0100820static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
821 struct efx_tso_header *tsoh, unsigned len)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100822{
823 struct efx_tx_buffer *buffer;
824
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000825 buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100826 efx_tsoh_free(tx_queue, buffer);
827 EFX_BUG_ON_PARANOID(buffer->len);
828 EFX_BUG_ON_PARANOID(buffer->unmap_len);
829 EFX_BUG_ON_PARANOID(buffer->skb);
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100830 EFX_BUG_ON_PARANOID(!buffer->continuation);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100831 EFX_BUG_ON_PARANOID(buffer->tsoh);
832 buffer->len = len;
833 buffer->dma_addr = tsoh->dma_addr;
834 buffer->tsoh = tsoh;
835
836 ++tx_queue->insert_count;
837}
838
839
840/* Remove descriptors put into a tx_queue. */
841static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
842{
843 struct efx_tx_buffer *buffer;
Ben Hutchingscc12dac2008-09-01 12:46:43 +0100844 dma_addr_t unmap_addr;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100845
846 /* Work backwards until we hit the original insert pointer value */
847 while (tx_queue->insert_count != tx_queue->write_count) {
848 --tx_queue->insert_count;
849 buffer = &tx_queue->buffer[tx_queue->insert_count &
Steve Hodgsonecc910f2010-09-10 06:42:22 +0000850 tx_queue->ptr_mask];
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100851 efx_tsoh_free(tx_queue, buffer);
852 EFX_BUG_ON_PARANOID(buffer->skb);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100853 if (buffer->unmap_len) {
Ben Hutchingscc12dac2008-09-01 12:46:43 +0100854 unmap_addr = (buffer->dma_addr + buffer->len -
855 buffer->unmap_len);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100856 if (buffer->unmap_single)
857 pci_unmap_single(tx_queue->efx->pci_dev,
Ben Hutchingscc12dac2008-09-01 12:46:43 +0100858 unmap_addr, buffer->unmap_len,
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100859 PCI_DMA_TODEVICE);
860 else
861 pci_unmap_page(tx_queue->efx->pci_dev,
Ben Hutchingscc12dac2008-09-01 12:46:43 +0100862 unmap_addr, buffer->unmap_len,
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100863 PCI_DMA_TODEVICE);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100864 buffer->unmap_len = 0;
865 }
Neil Turtona7ebd272009-12-23 13:47:13 +0000866 buffer->len = 0;
867 buffer->continuation = true;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100868 }
869}
870
871
872/* Parse the SKB header and initialise state. */
Ben Hutchings4d566062008-09-01 12:47:12 +0100873static void tso_start(struct tso_state *st, const struct sk_buff *skb)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100874{
875 /* All ethernet/IP/TCP headers combined size is TCP header size
876 * plus offset of TCP header relative to start of packet.
877 */
Ben Hutchings23d9e602008-09-01 12:47:02 +0100878 st->header_len = ((tcp_hdr(skb)->doff << 2u)
879 + PTR_DIFF(tcp_hdr(skb), skb->data));
880 st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100881
Ben Hutchings738a8f42009-11-29 15:16:05 +0000882 if (st->protocol == htons(ETH_P_IP))
883 st->ipv4_id = ntohs(ip_hdr(skb)->id);
884 else
885 st->ipv4_id = 0;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100886 st->seqnum = ntohl(tcp_hdr(skb)->seq);
887
888 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
889 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
890 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
891
Ben Hutchings23d9e602008-09-01 12:47:02 +0100892 st->packet_space = st->full_packet_size;
893 st->out_len = skb->len - st->header_len;
894 st->unmap_len = 0;
895 st->unmap_single = false;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100896}
897
Ben Hutchings4d566062008-09-01 12:47:12 +0100898static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
899 skb_frag_t *frag)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100900{
Ben Hutchings23d9e602008-09-01 12:47:02 +0100901 st->unmap_addr = pci_map_page(efx->pci_dev, frag->page,
902 frag->page_offset, frag->size,
903 PCI_DMA_TODEVICE);
904 if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) {
905 st->unmap_single = false;
906 st->unmap_len = frag->size;
907 st->in_len = frag->size;
908 st->dma_addr = st->unmap_addr;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100909 return 0;
910 }
911 return -ENOMEM;
912}
913
Ben Hutchings4d566062008-09-01 12:47:12 +0100914static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
915 const struct sk_buff *skb)
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100916{
Ben Hutchings23d9e602008-09-01 12:47:02 +0100917 int hl = st->header_len;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100918 int len = skb_headlen(skb) - hl;
919
Ben Hutchings23d9e602008-09-01 12:47:02 +0100920 st->unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl,
921 len, PCI_DMA_TODEVICE);
922 if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) {
923 st->unmap_single = true;
924 st->unmap_len = len;
925 st->in_len = len;
926 st->dma_addr = st->unmap_addr;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100927 return 0;
928 }
929 return -ENOMEM;
930}
931
932
933/**
934 * tso_fill_packet_with_fragment - form descriptors for the current fragment
935 * @tx_queue: Efx TX queue
936 * @skb: Socket buffer
937 * @st: TSO state
938 *
939 * Form descriptors for the current fragment, until we reach the end
940 * of fragment or end-of-packet. Return 0 on success, 1 if not enough
941 * space in @tx_queue.
942 */
Ben Hutchings4d566062008-09-01 12:47:12 +0100943static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
944 const struct sk_buff *skb,
945 struct tso_state *st)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100946{
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100947 struct efx_tx_buffer *buffer;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100948 int n, end_of_packet, rc;
949
Ben Hutchings23d9e602008-09-01 12:47:02 +0100950 if (st->in_len == 0)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100951 return 0;
952 if (st->packet_space == 0)
953 return 0;
954
Ben Hutchings23d9e602008-09-01 12:47:02 +0100955 EFX_BUG_ON_PARANOID(st->in_len <= 0);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100956 EFX_BUG_ON_PARANOID(st->packet_space <= 0);
957
Ben Hutchings23d9e602008-09-01 12:47:02 +0100958 n = min(st->in_len, st->packet_space);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100959
960 st->packet_space -= n;
Ben Hutchings23d9e602008-09-01 12:47:02 +0100961 st->out_len -= n;
962 st->in_len -= n;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100963
Ben Hutchings23d9e602008-09-01 12:47:02 +0100964 rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100965 if (likely(rc == 0)) {
Ben Hutchings23d9e602008-09-01 12:47:02 +0100966 if (st->out_len == 0)
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100967 /* Transfer ownership of the skb */
968 buffer->skb = skb;
969
Ben Hutchings23d9e602008-09-01 12:47:02 +0100970 end_of_packet = st->out_len == 0 || st->packet_space == 0;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100971 buffer->continuation = !end_of_packet;
972
Ben Hutchings23d9e602008-09-01 12:47:02 +0100973 if (st->in_len == 0) {
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100974 /* Transfer ownership of the pci mapping */
Ben Hutchings23d9e602008-09-01 12:47:02 +0100975 buffer->unmap_len = st->unmap_len;
976 buffer->unmap_single = st->unmap_single;
977 st->unmap_len = 0;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100978 }
979 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100980
Ben Hutchings23d9e602008-09-01 12:47:02 +0100981 st->dma_addr += n;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100982 return rc;
983}
984
985
986/**
987 * tso_start_new_packet - generate a new header and prepare for the new packet
988 * @tx_queue: Efx TX queue
989 * @skb: Socket buffer
990 * @st: TSO state
991 *
992 * Generate a new header and prepare for the new packet. Return 0 on
993 * success, or -1 if failed to alloc header.
994 */
Ben Hutchings4d566062008-09-01 12:47:12 +0100995static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
996 const struct sk_buff *skb,
997 struct tso_state *st)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100998{
999 struct efx_tso_header *tsoh;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001000 struct tcphdr *tsoh_th;
1001 unsigned ip_length;
1002 u8 *header;
1003
1004 /* Allocate a DMA-mapped header buffer. */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001005 if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) {
Ben Hutchingsb3475642008-05-16 21:15:49 +01001006 if (tx_queue->tso_headers_free == NULL) {
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001007 if (efx_tsoh_block_alloc(tx_queue))
1008 return -1;
Ben Hutchingsb3475642008-05-16 21:15:49 +01001009 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001010 EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
1011 tsoh = tx_queue->tso_headers_free;
1012 tx_queue->tso_headers_free = tsoh->next;
1013 tsoh->unmap_len = 0;
1014 } else {
1015 tx_queue->tso_long_headers++;
Ben Hutchings23d9e602008-09-01 12:47:02 +01001016 tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001017 if (unlikely(!tsoh))
1018 return -1;
1019 }
1020
1021 header = TSOH_BUFFER(tsoh);
1022 tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb));
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001023
1024 /* Copy and update the headers. */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001025 memcpy(header, skb->data, st->header_len);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001026
1027 tsoh_th->seq = htonl(st->seqnum);
1028 st->seqnum += skb_shinfo(skb)->gso_size;
Ben Hutchings23d9e602008-09-01 12:47:02 +01001029 if (st->out_len > skb_shinfo(skb)->gso_size) {
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001030 /* This packet will not finish the TSO burst. */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001031 ip_length = st->full_packet_size - ETH_HDR_LEN(skb);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001032 tsoh_th->fin = 0;
1033 tsoh_th->psh = 0;
1034 } else {
1035 /* This packet will be the last in the TSO burst. */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001036 ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001037 tsoh_th->fin = tcp_hdr(skb)->fin;
1038 tsoh_th->psh = tcp_hdr(skb)->psh;
1039 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001040
Ben Hutchings738a8f42009-11-29 15:16:05 +00001041 if (st->protocol == htons(ETH_P_IP)) {
1042 struct iphdr *tsoh_iph =
1043 (struct iphdr *)(header + SKB_IPV4_OFF(skb));
1044
1045 tsoh_iph->tot_len = htons(ip_length);
1046
1047 /* Linux leaves suitable gaps in the IP ID space for us to fill. */
1048 tsoh_iph->id = htons(st->ipv4_id);
1049 st->ipv4_id++;
1050 } else {
1051 struct ipv6hdr *tsoh_iph =
1052 (struct ipv6hdr *)(header + SKB_IPV6_OFF(skb));
1053
1054 tsoh_iph->payload_len = htons(ip_length - sizeof(*tsoh_iph));
1055 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001056
1057 st->packet_space = skb_shinfo(skb)->gso_size;
1058 ++tx_queue->tso_packets;
1059
1060 /* Form a descriptor for this header. */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001061 efx_tso_put_header(tx_queue, tsoh, st->header_len);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001062
1063 return 0;
1064}
1065
1066
1067/**
1068 * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
1069 * @tx_queue: Efx TX queue
1070 * @skb: Socket buffer
1071 *
1072 * Context: You must hold netif_tx_lock() to call this function.
1073 *
1074 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1075 * @skb was not enqueued. In all cases @skb is consumed. Return
1076 * %NETDEV_TX_OK or %NETDEV_TX_BUSY.
1077 */
1078static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
Ben Hutchings740847d2008-09-01 12:48:23 +01001079 struct sk_buff *skb)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001080{
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001081 struct efx_nic *efx = tx_queue->efx;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001082 int frag_i, rc, rc2 = NETDEV_TX_OK;
1083 struct tso_state state;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001084
Ben Hutchings738a8f42009-11-29 15:16:05 +00001085 /* Find the packet protocol and sanity-check it */
1086 state.protocol = efx_tso_check_protocol(skb);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001087
1088 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
1089
1090 tso_start(&state, skb);
1091
1092 /* Assume that skb header area contains exactly the headers, and
1093 * all payload is in the frag list.
1094 */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001095 if (skb_headlen(skb) == state.header_len) {
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001096 /* Grab the first payload fragment. */
1097 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
1098 frag_i = 0;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001099 rc = tso_get_fragment(&state, efx,
1100 skb_shinfo(skb)->frags + frag_i);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001101 if (rc)
1102 goto mem_err;
1103 } else {
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001104 rc = tso_get_head_fragment(&state, efx, skb);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001105 if (rc)
1106 goto mem_err;
1107 frag_i = -1;
1108 }
1109
1110 if (tso_start_new_packet(tx_queue, skb, &state) < 0)
1111 goto mem_err;
1112
1113 while (1) {
1114 rc = tso_fill_packet_with_fragment(tx_queue, skb, &state);
1115 if (unlikely(rc))
1116 goto stop;
1117
1118 /* Move onto the next fragment? */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001119 if (state.in_len == 0) {
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001120 if (++frag_i >= skb_shinfo(skb)->nr_frags)
1121 /* End of payload reached. */
1122 break;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001123 rc = tso_get_fragment(&state, efx,
1124 skb_shinfo(skb)->frags + frag_i);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001125 if (rc)
1126 goto mem_err;
1127 }
1128
1129 /* Start at new packet? */
1130 if (state.packet_space == 0 &&
1131 tso_start_new_packet(tx_queue, skb, &state) < 0)
1132 goto mem_err;
1133 }
1134
1135 /* Pass off to hardware */
Ben Hutchings152b6a62009-11-29 03:43:56 +00001136 efx_nic_push_buffers(tx_queue);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001137
1138 tx_queue->tso_bursts++;
1139 return NETDEV_TX_OK;
1140
1141 mem_err:
Ben Hutchings62776d02010-06-23 11:30:07 +00001142 netif_err(efx, tx_err, efx->net_dev,
1143 "Out of memory for TSO headers, or PCI mapping error\n");
Ben Hutchings9bc183d2009-11-23 16:06:47 +00001144 dev_kfree_skb_any(skb);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001145 goto unwind;
1146
1147 stop:
1148 rc2 = NETDEV_TX_BUSY;
1149
1150 /* Stop the queue if it wasn't stopped before. */
1151 if (tx_queue->stopped == 1)
Ben Hutchingsa4900ac2010-04-28 09:30:43 +00001152 efx_stop_queue(tx_queue->channel);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001153
1154 unwind:
Ben Hutchings5988b632008-09-01 12:46:36 +01001155 /* Free the DMA mapping we were in the process of writing out */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001156 if (state.unmap_len) {
1157 if (state.unmap_single)
1158 pci_unmap_single(efx->pci_dev, state.unmap_addr,
1159 state.unmap_len, PCI_DMA_TODEVICE);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001160 else
Ben Hutchings23d9e602008-09-01 12:47:02 +01001161 pci_unmap_page(efx->pci_dev, state.unmap_addr,
1162 state.unmap_len, PCI_DMA_TODEVICE);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001163 }
Ben Hutchings5988b632008-09-01 12:46:36 +01001164
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001165 efx_enqueue_unwind(tx_queue);
1166 return rc2;
1167}
1168
1169
1170/*
1171 * Free up all TSO datastructures associated with tx_queue. This
1172 * routine should be called only once the tx_queue is both empty and
1173 * will no longer be used.
1174 */
1175static void efx_fini_tso(struct efx_tx_queue *tx_queue)
1176{
1177 unsigned i;
1178
Ben Hutchingsb3475642008-05-16 21:15:49 +01001179 if (tx_queue->buffer) {
Steve Hodgsonecc910f2010-09-10 06:42:22 +00001180 for (i = 0; i <= tx_queue->ptr_mask; ++i)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001181 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
Ben Hutchingsb3475642008-05-16 21:15:49 +01001182 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001183
1184 while (tx_queue->tso_headers_free != NULL)
1185 efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
1186 tx_queue->efx->pci_dev);
1187}