blob: a5e541dd8ce708474d3d3c68307ed6eb3814c9d5 [file] [log] [blame]
Ben Hutchings8ceee662008-04-27 12:55:59 +01001/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/pci.h>
12#include <linux/tcp.h>
13#include <linux/ip.h>
14#include <linux/in.h>
15#include <linux/if_ether.h>
16#include <linux/highmem.h>
17#include "net_driver.h"
18#include "tx.h"
19#include "efx.h"
20#include "falcon.h"
21#include "workarounds.h"
22
23/*
24 * TX descriptor ring full threshold
25 *
26 * The tx_queue descriptor ring fill-level must fall below this value
27 * before we restart the netif queue
28 */
Ben Hutchings3ffeabd2009-10-23 08:30:58 +000029#define EFX_TXQ_THRESHOLD (EFX_TXQ_MASK / 2u)
Ben Hutchings8ceee662008-04-27 12:55:59 +010030
31/* We want to be able to nest calls to netif_stop_queue(), since each
32 * channel can have an individual stop on the queue.
33 */
34void efx_stop_queue(struct efx_nic *efx)
35{
36 spin_lock_bh(&efx->netif_stop_lock);
37 EFX_TRACE(efx, "stop TX queue\n");
38
39 atomic_inc(&efx->netif_stop_count);
40 netif_stop_queue(efx->net_dev);
41
42 spin_unlock_bh(&efx->netif_stop_lock);
43}
44
45/* Wake netif's TX queue
46 * We want to be able to nest calls to netif_stop_queue(), since each
47 * channel can have an individual stop on the queue.
48 */
Ben Hutchings4d566062008-09-01 12:47:12 +010049void efx_wake_queue(struct efx_nic *efx)
Ben Hutchings8ceee662008-04-27 12:55:59 +010050{
51 local_bh_disable();
52 if (atomic_dec_and_lock(&efx->netif_stop_count,
53 &efx->netif_stop_lock)) {
54 EFX_TRACE(efx, "waking TX queue\n");
55 netif_wake_queue(efx->net_dev);
56 spin_unlock(&efx->netif_stop_lock);
57 }
58 local_bh_enable();
59}
60
Ben Hutchings4d566062008-09-01 12:47:12 +010061static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
62 struct efx_tx_buffer *buffer)
Ben Hutchings8ceee662008-04-27 12:55:59 +010063{
64 if (buffer->unmap_len) {
65 struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
Ben Hutchingscc12dac2008-09-01 12:46:43 +010066 dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
67 buffer->unmap_len);
Ben Hutchings8ceee662008-04-27 12:55:59 +010068 if (buffer->unmap_single)
Ben Hutchingscc12dac2008-09-01 12:46:43 +010069 pci_unmap_single(pci_dev, unmap_addr, buffer->unmap_len,
70 PCI_DMA_TODEVICE);
Ben Hutchings8ceee662008-04-27 12:55:59 +010071 else
Ben Hutchingscc12dac2008-09-01 12:46:43 +010072 pci_unmap_page(pci_dev, unmap_addr, buffer->unmap_len,
73 PCI_DMA_TODEVICE);
Ben Hutchings8ceee662008-04-27 12:55:59 +010074 buffer->unmap_len = 0;
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +010075 buffer->unmap_single = false;
Ben Hutchings8ceee662008-04-27 12:55:59 +010076 }
77
78 if (buffer->skb) {
79 dev_kfree_skb_any((struct sk_buff *) buffer->skb);
80 buffer->skb = NULL;
81 EFX_TRACE(tx_queue->efx, "TX queue %d transmission id %x "
82 "complete\n", tx_queue->queue, read_ptr);
83 }
84}
85
Ben Hutchingsb9b39b62008-05-07 12:51:12 +010086/**
87 * struct efx_tso_header - a DMA mapped buffer for packet headers
88 * @next: Linked list of free ones.
89 * The list is protected by the TX queue lock.
90 * @dma_unmap_len: Length to unmap for an oversize buffer, or 0.
91 * @dma_addr: The DMA address of the header below.
92 *
93 * This controls the memory used for a TSO header. Use TSOH_DATA()
94 * to find the packet header data. Use TSOH_SIZE() to calculate the
95 * total size required for a given packet header length. TSO headers
96 * in the free list are exactly %TSOH_STD_SIZE bytes in size.
97 */
98struct efx_tso_header {
99 union {
100 struct efx_tso_header *next;
101 size_t unmap_len;
102 };
103 dma_addr_t dma_addr;
104};
105
106static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
Ben Hutchings740847d2008-09-01 12:48:23 +0100107 struct sk_buff *skb);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100108static void efx_fini_tso(struct efx_tx_queue *tx_queue);
109static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue,
110 struct efx_tso_header *tsoh);
111
Ben Hutchings4d566062008-09-01 12:47:12 +0100112static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
113 struct efx_tx_buffer *buffer)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100114{
115 if (buffer->tsoh) {
116 if (likely(!buffer->tsoh->unmap_len)) {
117 buffer->tsoh->next = tx_queue->tso_headers_free;
118 tx_queue->tso_headers_free = buffer->tsoh;
119 } else {
120 efx_tsoh_heap_free(tx_queue, buffer->tsoh);
121 }
122 buffer->tsoh = NULL;
123 }
124}
125
Ben Hutchings8ceee662008-04-27 12:55:59 +0100126
Ben Hutchings63f19882009-10-23 08:31:20 +0000127static inline unsigned
128efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
129{
130 /* Depending on the NIC revision, we can use descriptor
131 * lengths up to 8K or 8K-1. However, since PCI Express
132 * devices must split read requests at 4K boundaries, there is
133 * little benefit from using descriptors that cross those
134 * boundaries and we keep things simple by not doing so.
135 */
136 unsigned len = (~dma_addr & 0xfff) + 1;
137
138 /* Work around hardware bug for unaligned buffers. */
139 if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
140 len = min_t(unsigned, len, 512 - (dma_addr & 0xf));
141
142 return len;
143}
144
Ben Hutchings8ceee662008-04-27 12:55:59 +0100145/*
146 * Add a socket buffer to a TX queue
147 *
148 * This maps all fragments of a socket buffer for DMA and adds them to
149 * the TX queue. The queue's insert pointer will be incremented by
150 * the number of fragments in the socket buffer.
151 *
152 * If any DMA mapping fails, any mapped fragments will be unmapped,
153 * the queue's insert pointer will be restored to its original value.
154 *
Ben Hutchings497f5ba2009-11-23 16:07:05 +0000155 * This function is split out from efx_hard_start_xmit to allow the
156 * loopback test to direct packets via specific TX queues.
157 *
Ben Hutchings8ceee662008-04-27 12:55:59 +0100158 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
159 * You must hold netif_tx_lock() to call this function.
160 */
Ben Hutchings497f5ba2009-11-23 16:07:05 +0000161netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100162{
163 struct efx_nic *efx = tx_queue->efx;
164 struct pci_dev *pci_dev = efx->pci_dev;
165 struct efx_tx_buffer *buffer;
166 skb_frag_t *fragment;
167 struct page *page;
168 int page_offset;
Ben Hutchings63f19882009-10-23 08:31:20 +0000169 unsigned int len, unmap_len = 0, fill_level, insert_ptr;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100170 dma_addr_t dma_addr, unmap_addr = 0;
171 unsigned int dma_len;
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100172 bool unmap_single;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100173 int q_space, i = 0;
Stephen Hemminger613573252009-08-31 19:50:58 +0000174 netdev_tx_t rc = NETDEV_TX_OK;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100175
176 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
177
Ben Hutchings9bc183d2009-11-23 16:06:47 +0000178 if (skb_shinfo(skb)->gso_size)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100179 return efx_enqueue_skb_tso(tx_queue, skb);
180
Ben Hutchings8ceee662008-04-27 12:55:59 +0100181 /* Get size of the initial fragment */
182 len = skb_headlen(skb);
183
Ben Hutchingsbb145a92009-03-20 13:25:39 +0000184 /* Pad if necessary */
185 if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) {
186 EFX_BUG_ON_PARANOID(skb->data_len);
187 len = 32 + 1;
188 if (skb_pad(skb, len - skb->len))
189 return NETDEV_TX_OK;
190 }
191
Ben Hutchings8ceee662008-04-27 12:55:59 +0100192 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
Ben Hutchings3ffeabd2009-10-23 08:30:58 +0000193 q_space = EFX_TXQ_MASK - 1 - fill_level;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100194
195 /* Map for DMA. Use pci_map_single rather than pci_map_page
196 * since this is more efficient on machines with sparse
197 * memory.
198 */
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100199 unmap_single = true;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100200 dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE);
201
202 /* Process all fragments */
203 while (1) {
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700204 if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr)))
Ben Hutchings8ceee662008-04-27 12:55:59 +0100205 goto pci_err;
206
207 /* Store fields for marking in the per-fragment final
208 * descriptor */
209 unmap_len = len;
210 unmap_addr = dma_addr;
211
212 /* Add to TX queue, splitting across DMA boundaries */
213 do {
214 if (unlikely(q_space-- <= 0)) {
215 /* It might be that completions have
216 * happened since the xmit path last
217 * checked. Update the xmit path's
218 * copy of read_count.
219 */
220 ++tx_queue->stopped;
221 /* This memory barrier protects the
222 * change of stopped from the access
223 * of read_count. */
224 smp_mb();
225 tx_queue->old_read_count =
226 *(volatile unsigned *)
227 &tx_queue->read_count;
228 fill_level = (tx_queue->insert_count
229 - tx_queue->old_read_count);
Ben Hutchings3ffeabd2009-10-23 08:30:58 +0000230 q_space = EFX_TXQ_MASK - 1 - fill_level;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100231 if (unlikely(q_space-- <= 0))
232 goto stop;
233 smp_mb();
234 --tx_queue->stopped;
235 }
236
Ben Hutchings3ffeabd2009-10-23 08:30:58 +0000237 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100238 buffer = &tx_queue->buffer[insert_ptr];
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100239 efx_tsoh_free(tx_queue, buffer);
240 EFX_BUG_ON_PARANOID(buffer->tsoh);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100241 EFX_BUG_ON_PARANOID(buffer->skb);
242 EFX_BUG_ON_PARANOID(buffer->len);
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100243 EFX_BUG_ON_PARANOID(!buffer->continuation);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100244 EFX_BUG_ON_PARANOID(buffer->unmap_len);
245
Ben Hutchings63f19882009-10-23 08:31:20 +0000246 dma_len = efx_max_tx_len(efx, dma_addr);
247 if (likely(dma_len >= len))
Ben Hutchings8ceee662008-04-27 12:55:59 +0100248 dma_len = len;
249
Ben Hutchings8ceee662008-04-27 12:55:59 +0100250 /* Fill out per descriptor fields */
251 buffer->len = dma_len;
252 buffer->dma_addr = dma_addr;
253 len -= dma_len;
254 dma_addr += dma_len;
255 ++tx_queue->insert_count;
256 } while (len);
257
258 /* Transfer ownership of the unmapping to the final buffer */
Ben Hutchings8ceee662008-04-27 12:55:59 +0100259 buffer->unmap_single = unmap_single;
260 buffer->unmap_len = unmap_len;
261 unmap_len = 0;
262
263 /* Get address and size of next fragment */
264 if (i >= skb_shinfo(skb)->nr_frags)
265 break;
266 fragment = &skb_shinfo(skb)->frags[i];
267 len = fragment->size;
268 page = fragment->page;
269 page_offset = fragment->page_offset;
270 i++;
271 /* Map for DMA */
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100272 unmap_single = false;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100273 dma_addr = pci_map_page(pci_dev, page, page_offset, len,
274 PCI_DMA_TODEVICE);
275 }
276
277 /* Transfer ownership of the skb to the final buffer */
278 buffer->skb = skb;
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100279 buffer->continuation = false;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100280
281 /* Pass off to hardware */
282 falcon_push_buffers(tx_queue);
283
284 return NETDEV_TX_OK;
285
286 pci_err:
287 EFX_ERR_RL(efx, " TX queue %d could not map skb with %d bytes %d "
288 "fragments for DMA\n", tx_queue->queue, skb->len,
289 skb_shinfo(skb)->nr_frags + 1);
290
291 /* Mark the packet as transmitted, and free the SKB ourselves */
Ben Hutchings9bc183d2009-11-23 16:06:47 +0000292 dev_kfree_skb_any(skb);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100293 goto unwind;
294
295 stop:
296 rc = NETDEV_TX_BUSY;
297
298 if (tx_queue->stopped == 1)
299 efx_stop_queue(efx);
300
301 unwind:
302 /* Work backwards until we hit the original insert pointer value */
303 while (tx_queue->insert_count != tx_queue->write_count) {
304 --tx_queue->insert_count;
Ben Hutchings3ffeabd2009-10-23 08:30:58 +0000305 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100306 buffer = &tx_queue->buffer[insert_ptr];
307 efx_dequeue_buffer(tx_queue, buffer);
308 buffer->len = 0;
309 }
310
311 /* Free the fragment we were mid-way through pushing */
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100312 if (unmap_len) {
313 if (unmap_single)
314 pci_unmap_single(pci_dev, unmap_addr, unmap_len,
315 PCI_DMA_TODEVICE);
316 else
317 pci_unmap_page(pci_dev, unmap_addr, unmap_len,
318 PCI_DMA_TODEVICE);
319 }
Ben Hutchings8ceee662008-04-27 12:55:59 +0100320
321 return rc;
322}
323
324/* Remove packets from the TX queue
325 *
326 * This removes packets from the TX queue, up to and including the
327 * specified index.
328 */
Ben Hutchings4d566062008-09-01 12:47:12 +0100329static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
330 unsigned int index)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100331{
332 struct efx_nic *efx = tx_queue->efx;
333 unsigned int stop_index, read_ptr;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100334
Ben Hutchings3ffeabd2009-10-23 08:30:58 +0000335 stop_index = (index + 1) & EFX_TXQ_MASK;
336 read_ptr = tx_queue->read_count & EFX_TXQ_MASK;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100337
338 while (read_ptr != stop_index) {
339 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
340 if (unlikely(buffer->len == 0)) {
341 EFX_ERR(tx_queue->efx, "TX queue %d spurious TX "
342 "completion id %x\n", tx_queue->queue,
343 read_ptr);
344 efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
345 return;
346 }
347
348 efx_dequeue_buffer(tx_queue, buffer);
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100349 buffer->continuation = true;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100350 buffer->len = 0;
351
352 ++tx_queue->read_count;
Ben Hutchings3ffeabd2009-10-23 08:30:58 +0000353 read_ptr = tx_queue->read_count & EFX_TXQ_MASK;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100354 }
355}
356
Ben Hutchings8ceee662008-04-27 12:55:59 +0100357/* Initiate a packet transmission. We use one channel per CPU
358 * (sharing when we have more CPUs than channels). On Falcon, the TX
359 * completion events will be directed back to the CPU that transmitted
360 * the packet, which should be cache-efficient.
361 *
362 * Context: non-blocking.
363 * Note that returning anything other than NETDEV_TX_OK will cause the
364 * OS to free the skb.
365 */
Stephen Hemminger613573252009-08-31 19:50:58 +0000366netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
367 struct net_device *net_dev)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100368{
Ben Hutchings767e4682008-09-01 12:43:14 +0100369 struct efx_nic *efx = netdev_priv(net_dev);
Ben Hutchings60ac1062008-09-01 12:44:59 +0100370 struct efx_tx_queue *tx_queue;
371
Ben Hutchingsa7ef5932009-03-04 09:52:37 +0000372 if (unlikely(efx->port_inhibited))
373 return NETDEV_TX_BUSY;
374
Ben Hutchings60ac1062008-09-01 12:44:59 +0100375 if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
376 tx_queue = &efx->tx_queue[EFX_TX_QUEUE_OFFLOAD_CSUM];
377 else
378 tx_queue = &efx->tx_queue[EFX_TX_QUEUE_NO_CSUM];
379
Ben Hutchings497f5ba2009-11-23 16:07:05 +0000380 return efx_enqueue_skb(tx_queue, skb);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100381}
382
383void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
384{
385 unsigned fill_level;
386 struct efx_nic *efx = tx_queue->efx;
387
Ben Hutchings3ffeabd2009-10-23 08:30:58 +0000388 EFX_BUG_ON_PARANOID(index > EFX_TXQ_MASK);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100389
390 efx_dequeue_buffers(tx_queue, index);
391
392 /* See if we need to restart the netif queue. This barrier
393 * separates the update of read_count from the test of
394 * stopped. */
395 smp_mb();
Ben Hutchings32d76002009-03-04 09:53:15 +0000396 if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) {
Ben Hutchings8ceee662008-04-27 12:55:59 +0100397 fill_level = tx_queue->insert_count - tx_queue->read_count;
Ben Hutchings3ffeabd2009-10-23 08:30:58 +0000398 if (fill_level < EFX_TXQ_THRESHOLD) {
Ben Hutchings55668612008-05-16 21:16:10 +0100399 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
Ben Hutchings8ceee662008-04-27 12:55:59 +0100400
401 /* Do this under netif_tx_lock(), to avoid racing
402 * with efx_xmit(). */
403 netif_tx_lock(efx->net_dev);
404 if (tx_queue->stopped) {
405 tx_queue->stopped = 0;
406 efx_wake_queue(efx);
407 }
408 netif_tx_unlock(efx->net_dev);
409 }
410 }
411}
412
413int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
414{
415 struct efx_nic *efx = tx_queue->efx;
416 unsigned int txq_size;
417 int i, rc;
418
419 EFX_LOG(efx, "creating TX queue %d\n", tx_queue->queue);
420
421 /* Allocate software ring */
Ben Hutchings3ffeabd2009-10-23 08:30:58 +0000422 txq_size = EFX_TXQ_SIZE * sizeof(*tx_queue->buffer);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100423 tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL);
Ben Hutchings60ac1062008-09-01 12:44:59 +0100424 if (!tx_queue->buffer)
425 return -ENOMEM;
Ben Hutchings3ffeabd2009-10-23 08:30:58 +0000426 for (i = 0; i <= EFX_TXQ_MASK; ++i)
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100427 tx_queue->buffer[i].continuation = true;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100428
429 /* Allocate hardware ring */
430 rc = falcon_probe_tx(tx_queue);
431 if (rc)
Ben Hutchings60ac1062008-09-01 12:44:59 +0100432 goto fail;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100433
434 return 0;
435
Ben Hutchings60ac1062008-09-01 12:44:59 +0100436 fail:
Ben Hutchings8ceee662008-04-27 12:55:59 +0100437 kfree(tx_queue->buffer);
438 tx_queue->buffer = NULL;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100439 return rc;
440}
441
Ben Hutchingsbc3c90a2008-09-01 12:48:46 +0100442void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100443{
444 EFX_LOG(tx_queue->efx, "initialising TX queue %d\n", tx_queue->queue);
445
446 tx_queue->insert_count = 0;
447 tx_queue->write_count = 0;
448 tx_queue->read_count = 0;
449 tx_queue->old_read_count = 0;
450 BUG_ON(tx_queue->stopped);
451
452 /* Set up TX descriptor ring */
Ben Hutchingsbc3c90a2008-09-01 12:48:46 +0100453 falcon_init_tx(tx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100454}
455
456void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
457{
458 struct efx_tx_buffer *buffer;
459
460 if (!tx_queue->buffer)
461 return;
462
463 /* Free any buffers left in the ring */
464 while (tx_queue->read_count != tx_queue->write_count) {
Ben Hutchings3ffeabd2009-10-23 08:30:58 +0000465 buffer = &tx_queue->buffer[tx_queue->read_count & EFX_TXQ_MASK];
Ben Hutchings8ceee662008-04-27 12:55:59 +0100466 efx_dequeue_buffer(tx_queue, buffer);
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100467 buffer->continuation = true;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100468 buffer->len = 0;
469
470 ++tx_queue->read_count;
471 }
472}
473
474void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
475{
476 EFX_LOG(tx_queue->efx, "shutting down TX queue %d\n", tx_queue->queue);
477
478 /* Flush TX queue, remove descriptor ring */
479 falcon_fini_tx(tx_queue);
480
481 efx_release_tx_buffers(tx_queue);
482
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100483 /* Free up TSO header cache */
484 efx_fini_tso(tx_queue);
485
Ben Hutchings8ceee662008-04-27 12:55:59 +0100486 /* Release queue's stop on port, if any */
487 if (tx_queue->stopped) {
488 tx_queue->stopped = 0;
489 efx_wake_queue(tx_queue->efx);
490 }
491}
492
493void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
494{
495 EFX_LOG(tx_queue->efx, "destroying TX queue %d\n", tx_queue->queue);
496 falcon_remove_tx(tx_queue);
497
498 kfree(tx_queue->buffer);
499 tx_queue->buffer = NULL;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100500}
501
502
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100503/* Efx TCP segmentation acceleration.
504 *
505 * Why? Because by doing it here in the driver we can go significantly
506 * faster than the GSO.
507 *
508 * Requires TX checksum offload support.
509 */
510
511/* Number of bytes inserted at the start of a TSO header buffer,
512 * similar to NET_IP_ALIGN.
513 */
Ben Hutchings13e9ab12008-09-01 12:50:28 +0100514#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100515#define TSOH_OFFSET 0
516#else
517#define TSOH_OFFSET NET_IP_ALIGN
518#endif
519
520#define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET)
521
522/* Total size of struct efx_tso_header, buffer and padding */
523#define TSOH_SIZE(hdr_len) \
524 (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len)
525
526/* Size of blocks on free list. Larger blocks must be allocated from
527 * the heap.
528 */
529#define TSOH_STD_SIZE 128
530
531#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
532#define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data)
533#define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data)
534#define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data)
535
536/**
537 * struct tso_state - TSO state for an SKB
Ben Hutchings23d9e602008-09-01 12:47:02 +0100538 * @out_len: Remaining length in current segment
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100539 * @seqnum: Current sequence number
Ben Hutchings23d9e602008-09-01 12:47:02 +0100540 * @ipv4_id: Current IPv4 ID, host endian
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100541 * @packet_space: Remaining space in current packet
Ben Hutchings23d9e602008-09-01 12:47:02 +0100542 * @dma_addr: DMA address of current position
543 * @in_len: Remaining length in current SKB fragment
544 * @unmap_len: Length of SKB fragment
545 * @unmap_addr: DMA address of SKB fragment
546 * @unmap_single: DMA single vs page mapping flag
547 * @header_len: Number of bytes of header
548 * @full_packet_size: Number of bytes to put in each outgoing segment
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100549 *
550 * The state used during segmentation. It is put into this data structure
551 * just to make it easy to pass into inline functions.
552 */
553struct tso_state {
Ben Hutchings23d9e602008-09-01 12:47:02 +0100554 /* Output position */
555 unsigned out_len;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100556 unsigned seqnum;
Ben Hutchings23d9e602008-09-01 12:47:02 +0100557 unsigned ipv4_id;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100558 unsigned packet_space;
559
Ben Hutchings23d9e602008-09-01 12:47:02 +0100560 /* Input position */
561 dma_addr_t dma_addr;
562 unsigned in_len;
563 unsigned unmap_len;
564 dma_addr_t unmap_addr;
565 bool unmap_single;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100566
Ben Hutchings23d9e602008-09-01 12:47:02 +0100567 unsigned header_len;
568 int full_packet_size;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100569};
570
571
572/*
573 * Verify that our various assumptions about sk_buffs and the conditions
574 * under which TSO will be attempted hold true.
575 */
Ben Hutchings740847d2008-09-01 12:48:23 +0100576static void efx_tso_check_safe(struct sk_buff *skb)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100577{
Ben Hutchings740847d2008-09-01 12:48:23 +0100578 __be16 protocol = skb->protocol;
579
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100580 EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
Ben Hutchings740847d2008-09-01 12:48:23 +0100581 protocol);
582 if (protocol == htons(ETH_P_8021Q)) {
583 /* Find the encapsulated protocol; reset network header
584 * and transport header based on that. */
585 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
586 protocol = veh->h_vlan_encapsulated_proto;
587 skb_set_network_header(skb, sizeof(*veh));
588 if (protocol == htons(ETH_P_IP))
589 skb_set_transport_header(skb, sizeof(*veh) +
590 4 * ip_hdr(skb)->ihl);
591 }
592
593 EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IP));
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100594 EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
595 EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
596 + (tcp_hdr(skb)->doff << 2u)) >
597 skb_headlen(skb));
598}
599
600
601/*
602 * Allocate a page worth of efx_tso_header structures, and string them
603 * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM.
604 */
605static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
606{
607
608 struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
609 struct efx_tso_header *tsoh;
610 dma_addr_t dma_addr;
611 u8 *base_kva, *kva;
612
613 base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr);
614 if (base_kva == NULL) {
615 EFX_ERR(tx_queue->efx, "Unable to allocate page for TSO"
616 " headers\n");
617 return -ENOMEM;
618 }
619
620 /* pci_alloc_consistent() allocates pages. */
621 EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u));
622
623 for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) {
624 tsoh = (struct efx_tso_header *)kva;
625 tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva);
626 tsoh->next = tx_queue->tso_headers_free;
627 tx_queue->tso_headers_free = tsoh;
628 }
629
630 return 0;
631}
632
633
634/* Free up a TSO header, and all others in the same page. */
635static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
636 struct efx_tso_header *tsoh,
637 struct pci_dev *pci_dev)
638{
639 struct efx_tso_header **p;
640 unsigned long base_kva;
641 dma_addr_t base_dma;
642
643 base_kva = (unsigned long)tsoh & PAGE_MASK;
644 base_dma = tsoh->dma_addr & PAGE_MASK;
645
646 p = &tx_queue->tso_headers_free;
Ben Hutchingsb3475642008-05-16 21:15:49 +0100647 while (*p != NULL) {
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100648 if (((unsigned long)*p & PAGE_MASK) == base_kva)
649 *p = (*p)->next;
650 else
651 p = &(*p)->next;
Ben Hutchingsb3475642008-05-16 21:15:49 +0100652 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100653
654 pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma);
655}
656
657static struct efx_tso_header *
658efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
659{
660 struct efx_tso_header *tsoh;
661
662 tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA);
663 if (unlikely(!tsoh))
664 return NULL;
665
666 tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev,
667 TSOH_BUFFER(tsoh), header_len,
668 PCI_DMA_TODEVICE);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700669 if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev,
670 tsoh->dma_addr))) {
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100671 kfree(tsoh);
672 return NULL;
673 }
674
675 tsoh->unmap_len = header_len;
676 return tsoh;
677}
678
679static void
680efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
681{
682 pci_unmap_single(tx_queue->efx->pci_dev,
683 tsoh->dma_addr, tsoh->unmap_len,
684 PCI_DMA_TODEVICE);
685 kfree(tsoh);
686}
687
688/**
689 * efx_tx_queue_insert - push descriptors onto the TX queue
690 * @tx_queue: Efx TX queue
691 * @dma_addr: DMA address of fragment
692 * @len: Length of fragment
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100693 * @final_buffer: The final buffer inserted into the queue
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100694 *
695 * Push descriptors onto the TX queue. Return 0 on success or 1 if
696 * @tx_queue full.
697 */
698static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
699 dma_addr_t dma_addr, unsigned len,
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100700 struct efx_tx_buffer **final_buffer)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100701{
702 struct efx_tx_buffer *buffer;
703 struct efx_nic *efx = tx_queue->efx;
Ben Hutchings63f19882009-10-23 08:31:20 +0000704 unsigned dma_len, fill_level, insert_ptr;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100705 int q_space;
706
707 EFX_BUG_ON_PARANOID(len <= 0);
708
709 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
710 /* -1 as there is no way to represent all descriptors used */
Ben Hutchings3ffeabd2009-10-23 08:30:58 +0000711 q_space = EFX_TXQ_MASK - 1 - fill_level;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100712
713 while (1) {
714 if (unlikely(q_space-- <= 0)) {
715 /* It might be that completions have happened
716 * since the xmit path last checked. Update
717 * the xmit path's copy of read_count.
718 */
719 ++tx_queue->stopped;
720 /* This memory barrier protects the change of
721 * stopped from the access of read_count. */
722 smp_mb();
723 tx_queue->old_read_count =
724 *(volatile unsigned *)&tx_queue->read_count;
725 fill_level = (tx_queue->insert_count
726 - tx_queue->old_read_count);
Ben Hutchings3ffeabd2009-10-23 08:30:58 +0000727 q_space = EFX_TXQ_MASK - 1 - fill_level;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100728 if (unlikely(q_space-- <= 0)) {
729 *final_buffer = NULL;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100730 return 1;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100731 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100732 smp_mb();
733 --tx_queue->stopped;
734 }
735
Ben Hutchings3ffeabd2009-10-23 08:30:58 +0000736 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100737 buffer = &tx_queue->buffer[insert_ptr];
738 ++tx_queue->insert_count;
739
740 EFX_BUG_ON_PARANOID(tx_queue->insert_count -
741 tx_queue->read_count >
Ben Hutchings3ffeabd2009-10-23 08:30:58 +0000742 EFX_TXQ_MASK);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100743
744 efx_tsoh_free(tx_queue, buffer);
745 EFX_BUG_ON_PARANOID(buffer->len);
746 EFX_BUG_ON_PARANOID(buffer->unmap_len);
747 EFX_BUG_ON_PARANOID(buffer->skb);
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100748 EFX_BUG_ON_PARANOID(!buffer->continuation);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100749 EFX_BUG_ON_PARANOID(buffer->tsoh);
750
751 buffer->dma_addr = dma_addr;
752
Ben Hutchings63f19882009-10-23 08:31:20 +0000753 dma_len = efx_max_tx_len(efx, dma_addr);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100754
755 /* If there is enough space to send then do so */
756 if (dma_len >= len)
757 break;
758
759 buffer->len = dma_len; /* Don't set the other members */
760 dma_addr += dma_len;
761 len -= dma_len;
762 }
763
764 EFX_BUG_ON_PARANOID(!len);
765 buffer->len = len;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100766 *final_buffer = buffer;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100767 return 0;
768}
769
770
771/*
772 * Put a TSO header into the TX queue.
773 *
774 * This is special-cased because we know that it is small enough to fit in
775 * a single fragment, and we know it doesn't cross a page boundary. It
776 * also allows us to not worry about end-of-packet etc.
777 */
Ben Hutchings4d566062008-09-01 12:47:12 +0100778static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
779 struct efx_tso_header *tsoh, unsigned len)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100780{
781 struct efx_tx_buffer *buffer;
782
Ben Hutchings3ffeabd2009-10-23 08:30:58 +0000783 buffer = &tx_queue->buffer[tx_queue->insert_count & EFX_TXQ_MASK];
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100784 efx_tsoh_free(tx_queue, buffer);
785 EFX_BUG_ON_PARANOID(buffer->len);
786 EFX_BUG_ON_PARANOID(buffer->unmap_len);
787 EFX_BUG_ON_PARANOID(buffer->skb);
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100788 EFX_BUG_ON_PARANOID(!buffer->continuation);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100789 EFX_BUG_ON_PARANOID(buffer->tsoh);
790 buffer->len = len;
791 buffer->dma_addr = tsoh->dma_addr;
792 buffer->tsoh = tsoh;
793
794 ++tx_queue->insert_count;
795}
796
797
798/* Remove descriptors put into a tx_queue. */
799static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
800{
801 struct efx_tx_buffer *buffer;
Ben Hutchingscc12dac2008-09-01 12:46:43 +0100802 dma_addr_t unmap_addr;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100803
804 /* Work backwards until we hit the original insert pointer value */
805 while (tx_queue->insert_count != tx_queue->write_count) {
806 --tx_queue->insert_count;
807 buffer = &tx_queue->buffer[tx_queue->insert_count &
Ben Hutchings3ffeabd2009-10-23 08:30:58 +0000808 EFX_TXQ_MASK];
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100809 efx_tsoh_free(tx_queue, buffer);
810 EFX_BUG_ON_PARANOID(buffer->skb);
811 buffer->len = 0;
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100812 buffer->continuation = true;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100813 if (buffer->unmap_len) {
Ben Hutchingscc12dac2008-09-01 12:46:43 +0100814 unmap_addr = (buffer->dma_addr + buffer->len -
815 buffer->unmap_len);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100816 if (buffer->unmap_single)
817 pci_unmap_single(tx_queue->efx->pci_dev,
Ben Hutchingscc12dac2008-09-01 12:46:43 +0100818 unmap_addr, buffer->unmap_len,
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100819 PCI_DMA_TODEVICE);
820 else
821 pci_unmap_page(tx_queue->efx->pci_dev,
Ben Hutchingscc12dac2008-09-01 12:46:43 +0100822 unmap_addr, buffer->unmap_len,
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100823 PCI_DMA_TODEVICE);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100824 buffer->unmap_len = 0;
825 }
826 }
827}
828
829
830/* Parse the SKB header and initialise state. */
Ben Hutchings4d566062008-09-01 12:47:12 +0100831static void tso_start(struct tso_state *st, const struct sk_buff *skb)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100832{
833 /* All ethernet/IP/TCP headers combined size is TCP header size
834 * plus offset of TCP header relative to start of packet.
835 */
Ben Hutchings23d9e602008-09-01 12:47:02 +0100836 st->header_len = ((tcp_hdr(skb)->doff << 2u)
837 + PTR_DIFF(tcp_hdr(skb), skb->data));
838 st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100839
Ben Hutchings23d9e602008-09-01 12:47:02 +0100840 st->ipv4_id = ntohs(ip_hdr(skb)->id);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100841 st->seqnum = ntohl(tcp_hdr(skb)->seq);
842
843 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
844 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
845 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
846
Ben Hutchings23d9e602008-09-01 12:47:02 +0100847 st->packet_space = st->full_packet_size;
848 st->out_len = skb->len - st->header_len;
849 st->unmap_len = 0;
850 st->unmap_single = false;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100851}
852
Ben Hutchings4d566062008-09-01 12:47:12 +0100853static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
854 skb_frag_t *frag)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100855{
Ben Hutchings23d9e602008-09-01 12:47:02 +0100856 st->unmap_addr = pci_map_page(efx->pci_dev, frag->page,
857 frag->page_offset, frag->size,
858 PCI_DMA_TODEVICE);
859 if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) {
860 st->unmap_single = false;
861 st->unmap_len = frag->size;
862 st->in_len = frag->size;
863 st->dma_addr = st->unmap_addr;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100864 return 0;
865 }
866 return -ENOMEM;
867}
868
Ben Hutchings4d566062008-09-01 12:47:12 +0100869static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
870 const struct sk_buff *skb)
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100871{
Ben Hutchings23d9e602008-09-01 12:47:02 +0100872 int hl = st->header_len;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100873 int len = skb_headlen(skb) - hl;
874
Ben Hutchings23d9e602008-09-01 12:47:02 +0100875 st->unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl,
876 len, PCI_DMA_TODEVICE);
877 if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) {
878 st->unmap_single = true;
879 st->unmap_len = len;
880 st->in_len = len;
881 st->dma_addr = st->unmap_addr;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100882 return 0;
883 }
884 return -ENOMEM;
885}
886
887
888/**
889 * tso_fill_packet_with_fragment - form descriptors for the current fragment
890 * @tx_queue: Efx TX queue
891 * @skb: Socket buffer
892 * @st: TSO state
893 *
894 * Form descriptors for the current fragment, until we reach the end
895 * of fragment or end-of-packet. Return 0 on success, 1 if not enough
896 * space in @tx_queue.
897 */
Ben Hutchings4d566062008-09-01 12:47:12 +0100898static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
899 const struct sk_buff *skb,
900 struct tso_state *st)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100901{
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100902 struct efx_tx_buffer *buffer;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100903 int n, end_of_packet, rc;
904
Ben Hutchings23d9e602008-09-01 12:47:02 +0100905 if (st->in_len == 0)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100906 return 0;
907 if (st->packet_space == 0)
908 return 0;
909
Ben Hutchings23d9e602008-09-01 12:47:02 +0100910 EFX_BUG_ON_PARANOID(st->in_len <= 0);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100911 EFX_BUG_ON_PARANOID(st->packet_space <= 0);
912
Ben Hutchings23d9e602008-09-01 12:47:02 +0100913 n = min(st->in_len, st->packet_space);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100914
915 st->packet_space -= n;
Ben Hutchings23d9e602008-09-01 12:47:02 +0100916 st->out_len -= n;
917 st->in_len -= n;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100918
Ben Hutchings23d9e602008-09-01 12:47:02 +0100919 rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100920 if (likely(rc == 0)) {
Ben Hutchings23d9e602008-09-01 12:47:02 +0100921 if (st->out_len == 0)
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100922 /* Transfer ownership of the skb */
923 buffer->skb = skb;
924
Ben Hutchings23d9e602008-09-01 12:47:02 +0100925 end_of_packet = st->out_len == 0 || st->packet_space == 0;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100926 buffer->continuation = !end_of_packet;
927
Ben Hutchings23d9e602008-09-01 12:47:02 +0100928 if (st->in_len == 0) {
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100929 /* Transfer ownership of the pci mapping */
Ben Hutchings23d9e602008-09-01 12:47:02 +0100930 buffer->unmap_len = st->unmap_len;
931 buffer->unmap_single = st->unmap_single;
932 st->unmap_len = 0;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100933 }
934 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100935
Ben Hutchings23d9e602008-09-01 12:47:02 +0100936 st->dma_addr += n;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100937 return rc;
938}
939
940
941/**
942 * tso_start_new_packet - generate a new header and prepare for the new packet
943 * @tx_queue: Efx TX queue
944 * @skb: Socket buffer
945 * @st: TSO state
946 *
947 * Generate a new header and prepare for the new packet. Return 0 on
948 * success, or -1 if failed to alloc header.
949 */
Ben Hutchings4d566062008-09-01 12:47:12 +0100950static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
951 const struct sk_buff *skb,
952 struct tso_state *st)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100953{
954 struct efx_tso_header *tsoh;
955 struct iphdr *tsoh_iph;
956 struct tcphdr *tsoh_th;
957 unsigned ip_length;
958 u8 *header;
959
960 /* Allocate a DMA-mapped header buffer. */
Ben Hutchings23d9e602008-09-01 12:47:02 +0100961 if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) {
Ben Hutchingsb3475642008-05-16 21:15:49 +0100962 if (tx_queue->tso_headers_free == NULL) {
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100963 if (efx_tsoh_block_alloc(tx_queue))
964 return -1;
Ben Hutchingsb3475642008-05-16 21:15:49 +0100965 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100966 EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
967 tsoh = tx_queue->tso_headers_free;
968 tx_queue->tso_headers_free = tsoh->next;
969 tsoh->unmap_len = 0;
970 } else {
971 tx_queue->tso_long_headers++;
Ben Hutchings23d9e602008-09-01 12:47:02 +0100972 tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100973 if (unlikely(!tsoh))
974 return -1;
975 }
976
977 header = TSOH_BUFFER(tsoh);
978 tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb));
979 tsoh_iph = (struct iphdr *)(header + SKB_IPV4_OFF(skb));
980
981 /* Copy and update the headers. */
Ben Hutchings23d9e602008-09-01 12:47:02 +0100982 memcpy(header, skb->data, st->header_len);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100983
984 tsoh_th->seq = htonl(st->seqnum);
985 st->seqnum += skb_shinfo(skb)->gso_size;
Ben Hutchings23d9e602008-09-01 12:47:02 +0100986 if (st->out_len > skb_shinfo(skb)->gso_size) {
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100987 /* This packet will not finish the TSO burst. */
Ben Hutchings23d9e602008-09-01 12:47:02 +0100988 ip_length = st->full_packet_size - ETH_HDR_LEN(skb);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100989 tsoh_th->fin = 0;
990 tsoh_th->psh = 0;
991 } else {
992 /* This packet will be the last in the TSO burst. */
Ben Hutchings23d9e602008-09-01 12:47:02 +0100993 ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100994 tsoh_th->fin = tcp_hdr(skb)->fin;
995 tsoh_th->psh = tcp_hdr(skb)->psh;
996 }
997 tsoh_iph->tot_len = htons(ip_length);
998
999 /* Linux leaves suitable gaps in the IP ID space for us to fill. */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001000 tsoh_iph->id = htons(st->ipv4_id);
1001 st->ipv4_id++;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001002
1003 st->packet_space = skb_shinfo(skb)->gso_size;
1004 ++tx_queue->tso_packets;
1005
1006 /* Form a descriptor for this header. */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001007 efx_tso_put_header(tx_queue, tsoh, st->header_len);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001008
1009 return 0;
1010}
1011
1012
1013/**
1014 * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
1015 * @tx_queue: Efx TX queue
1016 * @skb: Socket buffer
1017 *
1018 * Context: You must hold netif_tx_lock() to call this function.
1019 *
1020 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1021 * @skb was not enqueued. In all cases @skb is consumed. Return
1022 * %NETDEV_TX_OK or %NETDEV_TX_BUSY.
1023 */
1024static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
Ben Hutchings740847d2008-09-01 12:48:23 +01001025 struct sk_buff *skb)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001026{
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001027 struct efx_nic *efx = tx_queue->efx;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001028 int frag_i, rc, rc2 = NETDEV_TX_OK;
1029 struct tso_state state;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001030
1031 /* Verify TSO is safe - these checks should never fail. */
1032 efx_tso_check_safe(skb);
1033
1034 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
1035
1036 tso_start(&state, skb);
1037
1038 /* Assume that skb header area contains exactly the headers, and
1039 * all payload is in the frag list.
1040 */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001041 if (skb_headlen(skb) == state.header_len) {
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001042 /* Grab the first payload fragment. */
1043 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
1044 frag_i = 0;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001045 rc = tso_get_fragment(&state, efx,
1046 skb_shinfo(skb)->frags + frag_i);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001047 if (rc)
1048 goto mem_err;
1049 } else {
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001050 rc = tso_get_head_fragment(&state, efx, skb);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001051 if (rc)
1052 goto mem_err;
1053 frag_i = -1;
1054 }
1055
1056 if (tso_start_new_packet(tx_queue, skb, &state) < 0)
1057 goto mem_err;
1058
1059 while (1) {
1060 rc = tso_fill_packet_with_fragment(tx_queue, skb, &state);
1061 if (unlikely(rc))
1062 goto stop;
1063
1064 /* Move onto the next fragment? */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001065 if (state.in_len == 0) {
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001066 if (++frag_i >= skb_shinfo(skb)->nr_frags)
1067 /* End of payload reached. */
1068 break;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001069 rc = tso_get_fragment(&state, efx,
1070 skb_shinfo(skb)->frags + frag_i);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001071 if (rc)
1072 goto mem_err;
1073 }
1074
1075 /* Start at new packet? */
1076 if (state.packet_space == 0 &&
1077 tso_start_new_packet(tx_queue, skb, &state) < 0)
1078 goto mem_err;
1079 }
1080
1081 /* Pass off to hardware */
1082 falcon_push_buffers(tx_queue);
1083
1084 tx_queue->tso_bursts++;
1085 return NETDEV_TX_OK;
1086
1087 mem_err:
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001088 EFX_ERR(efx, "Out of memory for TSO headers, or PCI mapping error\n");
Ben Hutchings9bc183d2009-11-23 16:06:47 +00001089 dev_kfree_skb_any(skb);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001090 goto unwind;
1091
1092 stop:
1093 rc2 = NETDEV_TX_BUSY;
1094
1095 /* Stop the queue if it wasn't stopped before. */
1096 if (tx_queue->stopped == 1)
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001097 efx_stop_queue(efx);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001098
1099 unwind:
Ben Hutchings5988b632008-09-01 12:46:36 +01001100 /* Free the DMA mapping we were in the process of writing out */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001101 if (state.unmap_len) {
1102 if (state.unmap_single)
1103 pci_unmap_single(efx->pci_dev, state.unmap_addr,
1104 state.unmap_len, PCI_DMA_TODEVICE);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001105 else
Ben Hutchings23d9e602008-09-01 12:47:02 +01001106 pci_unmap_page(efx->pci_dev, state.unmap_addr,
1107 state.unmap_len, PCI_DMA_TODEVICE);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001108 }
Ben Hutchings5988b632008-09-01 12:46:36 +01001109
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001110 efx_enqueue_unwind(tx_queue);
1111 return rc2;
1112}
1113
1114
1115/*
1116 * Free up all TSO datastructures associated with tx_queue. This
1117 * routine should be called only once the tx_queue is both empty and
1118 * will no longer be used.
1119 */
1120static void efx_fini_tso(struct efx_tx_queue *tx_queue)
1121{
1122 unsigned i;
1123
Ben Hutchingsb3475642008-05-16 21:15:49 +01001124 if (tx_queue->buffer) {
Ben Hutchings3ffeabd2009-10-23 08:30:58 +00001125 for (i = 0; i <= EFX_TXQ_MASK; ++i)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001126 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
Ben Hutchingsb3475642008-05-16 21:15:49 +01001127 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001128
1129 while (tx_queue->tso_headers_free != NULL)
1130 efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
1131 tx_queue->efx->pci_dev);
1132}