blob: 14a14788566c6a4962413eb105f8da6739ea8e77 [file] [log] [blame]
Ben Hutchings8ceee662008-04-27 12:55:59 +01001/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/pci.h>
12#include <linux/tcp.h>
13#include <linux/ip.h>
14#include <linux/in.h>
15#include <linux/if_ether.h>
16#include <linux/highmem.h>
17#include "net_driver.h"
18#include "tx.h"
19#include "efx.h"
20#include "falcon.h"
21#include "workarounds.h"
22
23/*
24 * TX descriptor ring full threshold
25 *
26 * The tx_queue descriptor ring fill-level must fall below this value
27 * before we restart the netif queue
28 */
29#define EFX_NETDEV_TX_THRESHOLD(_tx_queue) \
30 (_tx_queue->efx->type->txd_ring_mask / 2u)
31
32/* We want to be able to nest calls to netif_stop_queue(), since each
33 * channel can have an individual stop on the queue.
34 */
35void efx_stop_queue(struct efx_nic *efx)
36{
37 spin_lock_bh(&efx->netif_stop_lock);
38 EFX_TRACE(efx, "stop TX queue\n");
39
40 atomic_inc(&efx->netif_stop_count);
41 netif_stop_queue(efx->net_dev);
42
43 spin_unlock_bh(&efx->netif_stop_lock);
44}
45
46/* Wake netif's TX queue
47 * We want to be able to nest calls to netif_stop_queue(), since each
48 * channel can have an individual stop on the queue.
49 */
Ben Hutchings4d566062008-09-01 12:47:12 +010050void efx_wake_queue(struct efx_nic *efx)
Ben Hutchings8ceee662008-04-27 12:55:59 +010051{
52 local_bh_disable();
53 if (atomic_dec_and_lock(&efx->netif_stop_count,
54 &efx->netif_stop_lock)) {
55 EFX_TRACE(efx, "waking TX queue\n");
56 netif_wake_queue(efx->net_dev);
57 spin_unlock(&efx->netif_stop_lock);
58 }
59 local_bh_enable();
60}
61
Ben Hutchings4d566062008-09-01 12:47:12 +010062static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
63 struct efx_tx_buffer *buffer)
Ben Hutchings8ceee662008-04-27 12:55:59 +010064{
65 if (buffer->unmap_len) {
66 struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
Ben Hutchingscc12dac2008-09-01 12:46:43 +010067 dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
68 buffer->unmap_len);
Ben Hutchings8ceee662008-04-27 12:55:59 +010069 if (buffer->unmap_single)
Ben Hutchingscc12dac2008-09-01 12:46:43 +010070 pci_unmap_single(pci_dev, unmap_addr, buffer->unmap_len,
71 PCI_DMA_TODEVICE);
Ben Hutchings8ceee662008-04-27 12:55:59 +010072 else
Ben Hutchingscc12dac2008-09-01 12:46:43 +010073 pci_unmap_page(pci_dev, unmap_addr, buffer->unmap_len,
74 PCI_DMA_TODEVICE);
Ben Hutchings8ceee662008-04-27 12:55:59 +010075 buffer->unmap_len = 0;
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +010076 buffer->unmap_single = false;
Ben Hutchings8ceee662008-04-27 12:55:59 +010077 }
78
79 if (buffer->skb) {
80 dev_kfree_skb_any((struct sk_buff *) buffer->skb);
81 buffer->skb = NULL;
82 EFX_TRACE(tx_queue->efx, "TX queue %d transmission id %x "
83 "complete\n", tx_queue->queue, read_ptr);
84 }
85}
86
Ben Hutchingsb9b39b62008-05-07 12:51:12 +010087/**
88 * struct efx_tso_header - a DMA mapped buffer for packet headers
89 * @next: Linked list of free ones.
90 * The list is protected by the TX queue lock.
91 * @dma_unmap_len: Length to unmap for an oversize buffer, or 0.
92 * @dma_addr: The DMA address of the header below.
93 *
94 * This controls the memory used for a TSO header. Use TSOH_DATA()
95 * to find the packet header data. Use TSOH_SIZE() to calculate the
96 * total size required for a given packet header length. TSO headers
97 * in the free list are exactly %TSOH_STD_SIZE bytes in size.
98 */
99struct efx_tso_header {
100 union {
101 struct efx_tso_header *next;
102 size_t unmap_len;
103 };
104 dma_addr_t dma_addr;
105};
106
107static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
Ben Hutchings740847d2008-09-01 12:48:23 +0100108 struct sk_buff *skb);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100109static void efx_fini_tso(struct efx_tx_queue *tx_queue);
110static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue,
111 struct efx_tso_header *tsoh);
112
Ben Hutchings4d566062008-09-01 12:47:12 +0100113static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
114 struct efx_tx_buffer *buffer)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100115{
116 if (buffer->tsoh) {
117 if (likely(!buffer->tsoh->unmap_len)) {
118 buffer->tsoh->next = tx_queue->tso_headers_free;
119 tx_queue->tso_headers_free = buffer->tsoh;
120 } else {
121 efx_tsoh_heap_free(tx_queue, buffer->tsoh);
122 }
123 buffer->tsoh = NULL;
124 }
125}
126
Ben Hutchings8ceee662008-04-27 12:55:59 +0100127
128/*
129 * Add a socket buffer to a TX queue
130 *
131 * This maps all fragments of a socket buffer for DMA and adds them to
132 * the TX queue. The queue's insert pointer will be incremented by
133 * the number of fragments in the socket buffer.
134 *
135 * If any DMA mapping fails, any mapped fragments will be unmapped,
136 * the queue's insert pointer will be restored to its original value.
137 *
138 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
139 * You must hold netif_tx_lock() to call this function.
140 */
Ben Hutchings4d566062008-09-01 12:47:12 +0100141static int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
Ben Hutchings740847d2008-09-01 12:48:23 +0100142 struct sk_buff *skb)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100143{
144 struct efx_nic *efx = tx_queue->efx;
145 struct pci_dev *pci_dev = efx->pci_dev;
146 struct efx_tx_buffer *buffer;
147 skb_frag_t *fragment;
148 struct page *page;
149 int page_offset;
150 unsigned int len, unmap_len = 0, fill_level, insert_ptr, misalign;
151 dma_addr_t dma_addr, unmap_addr = 0;
152 unsigned int dma_len;
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100153 bool unmap_single;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100154 int q_space, i = 0;
155 int rc = NETDEV_TX_OK;
156
157 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
158
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100159 if (skb_shinfo((struct sk_buff *)skb)->gso_size)
160 return efx_enqueue_skb_tso(tx_queue, skb);
161
Ben Hutchings8ceee662008-04-27 12:55:59 +0100162 /* Get size of the initial fragment */
163 len = skb_headlen(skb);
164
Ben Hutchingsbb145a92009-03-20 13:25:39 +0000165 /* Pad if necessary */
166 if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) {
167 EFX_BUG_ON_PARANOID(skb->data_len);
168 len = 32 + 1;
169 if (skb_pad(skb, len - skb->len))
170 return NETDEV_TX_OK;
171 }
172
Ben Hutchings8ceee662008-04-27 12:55:59 +0100173 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
174 q_space = efx->type->txd_ring_mask - 1 - fill_level;
175
176 /* Map for DMA. Use pci_map_single rather than pci_map_page
177 * since this is more efficient on machines with sparse
178 * memory.
179 */
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100180 unmap_single = true;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100181 dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE);
182
183 /* Process all fragments */
184 while (1) {
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700185 if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr)))
Ben Hutchings8ceee662008-04-27 12:55:59 +0100186 goto pci_err;
187
188 /* Store fields for marking in the per-fragment final
189 * descriptor */
190 unmap_len = len;
191 unmap_addr = dma_addr;
192
193 /* Add to TX queue, splitting across DMA boundaries */
194 do {
195 if (unlikely(q_space-- <= 0)) {
196 /* It might be that completions have
197 * happened since the xmit path last
198 * checked. Update the xmit path's
199 * copy of read_count.
200 */
201 ++tx_queue->stopped;
202 /* This memory barrier protects the
203 * change of stopped from the access
204 * of read_count. */
205 smp_mb();
206 tx_queue->old_read_count =
207 *(volatile unsigned *)
208 &tx_queue->read_count;
209 fill_level = (tx_queue->insert_count
210 - tx_queue->old_read_count);
211 q_space = (efx->type->txd_ring_mask - 1 -
212 fill_level);
213 if (unlikely(q_space-- <= 0))
214 goto stop;
215 smp_mb();
216 --tx_queue->stopped;
217 }
218
219 insert_ptr = (tx_queue->insert_count &
220 efx->type->txd_ring_mask);
221 buffer = &tx_queue->buffer[insert_ptr];
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100222 efx_tsoh_free(tx_queue, buffer);
223 EFX_BUG_ON_PARANOID(buffer->tsoh);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100224 EFX_BUG_ON_PARANOID(buffer->skb);
225 EFX_BUG_ON_PARANOID(buffer->len);
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100226 EFX_BUG_ON_PARANOID(!buffer->continuation);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100227 EFX_BUG_ON_PARANOID(buffer->unmap_len);
228
229 dma_len = (((~dma_addr) & efx->type->tx_dma_mask) + 1);
230 if (likely(dma_len > len))
231 dma_len = len;
232
233 misalign = (unsigned)dma_addr & efx->type->bug5391_mask;
234 if (misalign && dma_len + misalign > 512)
235 dma_len = 512 - misalign;
236
237 /* Fill out per descriptor fields */
238 buffer->len = dma_len;
239 buffer->dma_addr = dma_addr;
240 len -= dma_len;
241 dma_addr += dma_len;
242 ++tx_queue->insert_count;
243 } while (len);
244
245 /* Transfer ownership of the unmapping to the final buffer */
Ben Hutchings8ceee662008-04-27 12:55:59 +0100246 buffer->unmap_single = unmap_single;
247 buffer->unmap_len = unmap_len;
248 unmap_len = 0;
249
250 /* Get address and size of next fragment */
251 if (i >= skb_shinfo(skb)->nr_frags)
252 break;
253 fragment = &skb_shinfo(skb)->frags[i];
254 len = fragment->size;
255 page = fragment->page;
256 page_offset = fragment->page_offset;
257 i++;
258 /* Map for DMA */
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100259 unmap_single = false;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100260 dma_addr = pci_map_page(pci_dev, page, page_offset, len,
261 PCI_DMA_TODEVICE);
262 }
263
264 /* Transfer ownership of the skb to the final buffer */
265 buffer->skb = skb;
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100266 buffer->continuation = false;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100267
268 /* Pass off to hardware */
269 falcon_push_buffers(tx_queue);
270
271 return NETDEV_TX_OK;
272
273 pci_err:
274 EFX_ERR_RL(efx, " TX queue %d could not map skb with %d bytes %d "
275 "fragments for DMA\n", tx_queue->queue, skb->len,
276 skb_shinfo(skb)->nr_frags + 1);
277
278 /* Mark the packet as transmitted, and free the SKB ourselves */
279 dev_kfree_skb_any((struct sk_buff *)skb);
280 goto unwind;
281
282 stop:
283 rc = NETDEV_TX_BUSY;
284
285 if (tx_queue->stopped == 1)
286 efx_stop_queue(efx);
287
288 unwind:
289 /* Work backwards until we hit the original insert pointer value */
290 while (tx_queue->insert_count != tx_queue->write_count) {
291 --tx_queue->insert_count;
292 insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask;
293 buffer = &tx_queue->buffer[insert_ptr];
294 efx_dequeue_buffer(tx_queue, buffer);
295 buffer->len = 0;
296 }
297
298 /* Free the fragment we were mid-way through pushing */
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100299 if (unmap_len) {
300 if (unmap_single)
301 pci_unmap_single(pci_dev, unmap_addr, unmap_len,
302 PCI_DMA_TODEVICE);
303 else
304 pci_unmap_page(pci_dev, unmap_addr, unmap_len,
305 PCI_DMA_TODEVICE);
306 }
Ben Hutchings8ceee662008-04-27 12:55:59 +0100307
308 return rc;
309}
310
311/* Remove packets from the TX queue
312 *
313 * This removes packets from the TX queue, up to and including the
314 * specified index.
315 */
Ben Hutchings4d566062008-09-01 12:47:12 +0100316static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
317 unsigned int index)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100318{
319 struct efx_nic *efx = tx_queue->efx;
320 unsigned int stop_index, read_ptr;
321 unsigned int mask = tx_queue->efx->type->txd_ring_mask;
322
323 stop_index = (index + 1) & mask;
324 read_ptr = tx_queue->read_count & mask;
325
326 while (read_ptr != stop_index) {
327 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
328 if (unlikely(buffer->len == 0)) {
329 EFX_ERR(tx_queue->efx, "TX queue %d spurious TX "
330 "completion id %x\n", tx_queue->queue,
331 read_ptr);
332 efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
333 return;
334 }
335
336 efx_dequeue_buffer(tx_queue, buffer);
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100337 buffer->continuation = true;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100338 buffer->len = 0;
339
340 ++tx_queue->read_count;
341 read_ptr = tx_queue->read_count & mask;
342 }
343}
344
345/* Initiate a packet transmission on the specified TX queue.
346 * Note that returning anything other than NETDEV_TX_OK will cause the
347 * OS to free the skb.
348 *
349 * This function is split out from efx_hard_start_xmit to allow the
350 * loopback test to direct packets via specific TX queues. It is
351 * therefore a non-static inline, so as not to penalise performance
352 * for non-loopback transmissions.
353 *
354 * Context: netif_tx_lock held
355 */
356inline int efx_xmit(struct efx_nic *efx,
357 struct efx_tx_queue *tx_queue, struct sk_buff *skb)
358{
359 int rc;
360
361 /* Map fragments for DMA and add to TX queue */
362 rc = efx_enqueue_skb(tx_queue, skb);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100363 return rc;
364}
365
366/* Initiate a packet transmission. We use one channel per CPU
367 * (sharing when we have more CPUs than channels). On Falcon, the TX
368 * completion events will be directed back to the CPU that transmitted
369 * the packet, which should be cache-efficient.
370 *
371 * Context: non-blocking.
372 * Note that returning anything other than NETDEV_TX_OK will cause the
373 * OS to free the skb.
374 */
375int efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
376{
Ben Hutchings767e4682008-09-01 12:43:14 +0100377 struct efx_nic *efx = netdev_priv(net_dev);
Ben Hutchings60ac1062008-09-01 12:44:59 +0100378 struct efx_tx_queue *tx_queue;
379
Ben Hutchingsa7ef5932009-03-04 09:52:37 +0000380 if (unlikely(efx->port_inhibited))
381 return NETDEV_TX_BUSY;
382
Ben Hutchings60ac1062008-09-01 12:44:59 +0100383 if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
384 tx_queue = &efx->tx_queue[EFX_TX_QUEUE_OFFLOAD_CSUM];
385 else
386 tx_queue = &efx->tx_queue[EFX_TX_QUEUE_NO_CSUM];
387
388 return efx_xmit(efx, tx_queue, skb);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100389}
390
391void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
392{
393 unsigned fill_level;
394 struct efx_nic *efx = tx_queue->efx;
395
396 EFX_BUG_ON_PARANOID(index > efx->type->txd_ring_mask);
397
398 efx_dequeue_buffers(tx_queue, index);
399
400 /* See if we need to restart the netif queue. This barrier
401 * separates the update of read_count from the test of
402 * stopped. */
403 smp_mb();
Ben Hutchings32d76002009-03-04 09:53:15 +0000404 if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) {
Ben Hutchings8ceee662008-04-27 12:55:59 +0100405 fill_level = tx_queue->insert_count - tx_queue->read_count;
406 if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) {
Ben Hutchings55668612008-05-16 21:16:10 +0100407 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
Ben Hutchings8ceee662008-04-27 12:55:59 +0100408
409 /* Do this under netif_tx_lock(), to avoid racing
410 * with efx_xmit(). */
411 netif_tx_lock(efx->net_dev);
412 if (tx_queue->stopped) {
413 tx_queue->stopped = 0;
414 efx_wake_queue(efx);
415 }
416 netif_tx_unlock(efx->net_dev);
417 }
418 }
419}
420
421int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
422{
423 struct efx_nic *efx = tx_queue->efx;
424 unsigned int txq_size;
425 int i, rc;
426
427 EFX_LOG(efx, "creating TX queue %d\n", tx_queue->queue);
428
429 /* Allocate software ring */
430 txq_size = (efx->type->txd_ring_mask + 1) * sizeof(*tx_queue->buffer);
431 tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL);
Ben Hutchings60ac1062008-09-01 12:44:59 +0100432 if (!tx_queue->buffer)
433 return -ENOMEM;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100434 for (i = 0; i <= efx->type->txd_ring_mask; ++i)
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100435 tx_queue->buffer[i].continuation = true;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100436
437 /* Allocate hardware ring */
438 rc = falcon_probe_tx(tx_queue);
439 if (rc)
Ben Hutchings60ac1062008-09-01 12:44:59 +0100440 goto fail;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100441
442 return 0;
443
Ben Hutchings60ac1062008-09-01 12:44:59 +0100444 fail:
Ben Hutchings8ceee662008-04-27 12:55:59 +0100445 kfree(tx_queue->buffer);
446 tx_queue->buffer = NULL;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100447 return rc;
448}
449
Ben Hutchingsbc3c90a2008-09-01 12:48:46 +0100450void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
Ben Hutchings8ceee662008-04-27 12:55:59 +0100451{
452 EFX_LOG(tx_queue->efx, "initialising TX queue %d\n", tx_queue->queue);
453
454 tx_queue->insert_count = 0;
455 tx_queue->write_count = 0;
456 tx_queue->read_count = 0;
457 tx_queue->old_read_count = 0;
458 BUG_ON(tx_queue->stopped);
459
460 /* Set up TX descriptor ring */
Ben Hutchingsbc3c90a2008-09-01 12:48:46 +0100461 falcon_init_tx(tx_queue);
Ben Hutchings8ceee662008-04-27 12:55:59 +0100462}
463
464void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
465{
466 struct efx_tx_buffer *buffer;
467
468 if (!tx_queue->buffer)
469 return;
470
471 /* Free any buffers left in the ring */
472 while (tx_queue->read_count != tx_queue->write_count) {
473 buffer = &tx_queue->buffer[tx_queue->read_count &
474 tx_queue->efx->type->txd_ring_mask];
475 efx_dequeue_buffer(tx_queue, buffer);
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100476 buffer->continuation = true;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100477 buffer->len = 0;
478
479 ++tx_queue->read_count;
480 }
481}
482
483void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
484{
485 EFX_LOG(tx_queue->efx, "shutting down TX queue %d\n", tx_queue->queue);
486
487 /* Flush TX queue, remove descriptor ring */
488 falcon_fini_tx(tx_queue);
489
490 efx_release_tx_buffers(tx_queue);
491
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100492 /* Free up TSO header cache */
493 efx_fini_tso(tx_queue);
494
Ben Hutchings8ceee662008-04-27 12:55:59 +0100495 /* Release queue's stop on port, if any */
496 if (tx_queue->stopped) {
497 tx_queue->stopped = 0;
498 efx_wake_queue(tx_queue->efx);
499 }
500}
501
502void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
503{
504 EFX_LOG(tx_queue->efx, "destroying TX queue %d\n", tx_queue->queue);
505 falcon_remove_tx(tx_queue);
506
507 kfree(tx_queue->buffer);
508 tx_queue->buffer = NULL;
Ben Hutchings8ceee662008-04-27 12:55:59 +0100509}
510
511
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100512/* Efx TCP segmentation acceleration.
513 *
514 * Why? Because by doing it here in the driver we can go significantly
515 * faster than the GSO.
516 *
517 * Requires TX checksum offload support.
518 */
519
520/* Number of bytes inserted at the start of a TSO header buffer,
521 * similar to NET_IP_ALIGN.
522 */
Ben Hutchings13e9ab12008-09-01 12:50:28 +0100523#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100524#define TSOH_OFFSET 0
525#else
526#define TSOH_OFFSET NET_IP_ALIGN
527#endif
528
529#define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET)
530
531/* Total size of struct efx_tso_header, buffer and padding */
532#define TSOH_SIZE(hdr_len) \
533 (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len)
534
535/* Size of blocks on free list. Larger blocks must be allocated from
536 * the heap.
537 */
538#define TSOH_STD_SIZE 128
539
540#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
541#define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data)
542#define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data)
543#define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data)
544
545/**
546 * struct tso_state - TSO state for an SKB
Ben Hutchings23d9e602008-09-01 12:47:02 +0100547 * @out_len: Remaining length in current segment
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100548 * @seqnum: Current sequence number
Ben Hutchings23d9e602008-09-01 12:47:02 +0100549 * @ipv4_id: Current IPv4 ID, host endian
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100550 * @packet_space: Remaining space in current packet
Ben Hutchings23d9e602008-09-01 12:47:02 +0100551 * @dma_addr: DMA address of current position
552 * @in_len: Remaining length in current SKB fragment
553 * @unmap_len: Length of SKB fragment
554 * @unmap_addr: DMA address of SKB fragment
555 * @unmap_single: DMA single vs page mapping flag
556 * @header_len: Number of bytes of header
557 * @full_packet_size: Number of bytes to put in each outgoing segment
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100558 *
559 * The state used during segmentation. It is put into this data structure
560 * just to make it easy to pass into inline functions.
561 */
562struct tso_state {
Ben Hutchings23d9e602008-09-01 12:47:02 +0100563 /* Output position */
564 unsigned out_len;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100565 unsigned seqnum;
Ben Hutchings23d9e602008-09-01 12:47:02 +0100566 unsigned ipv4_id;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100567 unsigned packet_space;
568
Ben Hutchings23d9e602008-09-01 12:47:02 +0100569 /* Input position */
570 dma_addr_t dma_addr;
571 unsigned in_len;
572 unsigned unmap_len;
573 dma_addr_t unmap_addr;
574 bool unmap_single;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100575
Ben Hutchings23d9e602008-09-01 12:47:02 +0100576 unsigned header_len;
577 int full_packet_size;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100578};
579
580
581/*
582 * Verify that our various assumptions about sk_buffs and the conditions
583 * under which TSO will be attempted hold true.
584 */
Ben Hutchings740847d2008-09-01 12:48:23 +0100585static void efx_tso_check_safe(struct sk_buff *skb)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100586{
Ben Hutchings740847d2008-09-01 12:48:23 +0100587 __be16 protocol = skb->protocol;
588
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100589 EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
Ben Hutchings740847d2008-09-01 12:48:23 +0100590 protocol);
591 if (protocol == htons(ETH_P_8021Q)) {
592 /* Find the encapsulated protocol; reset network header
593 * and transport header based on that. */
594 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
595 protocol = veh->h_vlan_encapsulated_proto;
596 skb_set_network_header(skb, sizeof(*veh));
597 if (protocol == htons(ETH_P_IP))
598 skb_set_transport_header(skb, sizeof(*veh) +
599 4 * ip_hdr(skb)->ihl);
600 }
601
602 EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IP));
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100603 EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
604 EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
605 + (tcp_hdr(skb)->doff << 2u)) >
606 skb_headlen(skb));
607}
608
609
610/*
611 * Allocate a page worth of efx_tso_header structures, and string them
612 * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM.
613 */
614static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
615{
616
617 struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
618 struct efx_tso_header *tsoh;
619 dma_addr_t dma_addr;
620 u8 *base_kva, *kva;
621
622 base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr);
623 if (base_kva == NULL) {
624 EFX_ERR(tx_queue->efx, "Unable to allocate page for TSO"
625 " headers\n");
626 return -ENOMEM;
627 }
628
629 /* pci_alloc_consistent() allocates pages. */
630 EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u));
631
632 for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) {
633 tsoh = (struct efx_tso_header *)kva;
634 tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva);
635 tsoh->next = tx_queue->tso_headers_free;
636 tx_queue->tso_headers_free = tsoh;
637 }
638
639 return 0;
640}
641
642
643/* Free up a TSO header, and all others in the same page. */
644static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
645 struct efx_tso_header *tsoh,
646 struct pci_dev *pci_dev)
647{
648 struct efx_tso_header **p;
649 unsigned long base_kva;
650 dma_addr_t base_dma;
651
652 base_kva = (unsigned long)tsoh & PAGE_MASK;
653 base_dma = tsoh->dma_addr & PAGE_MASK;
654
655 p = &tx_queue->tso_headers_free;
Ben Hutchingsb3475642008-05-16 21:15:49 +0100656 while (*p != NULL) {
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100657 if (((unsigned long)*p & PAGE_MASK) == base_kva)
658 *p = (*p)->next;
659 else
660 p = &(*p)->next;
Ben Hutchingsb3475642008-05-16 21:15:49 +0100661 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100662
663 pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma);
664}
665
666static struct efx_tso_header *
667efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
668{
669 struct efx_tso_header *tsoh;
670
671 tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA);
672 if (unlikely(!tsoh))
673 return NULL;
674
675 tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev,
676 TSOH_BUFFER(tsoh), header_len,
677 PCI_DMA_TODEVICE);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700678 if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev,
679 tsoh->dma_addr))) {
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100680 kfree(tsoh);
681 return NULL;
682 }
683
684 tsoh->unmap_len = header_len;
685 return tsoh;
686}
687
688static void
689efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
690{
691 pci_unmap_single(tx_queue->efx->pci_dev,
692 tsoh->dma_addr, tsoh->unmap_len,
693 PCI_DMA_TODEVICE);
694 kfree(tsoh);
695}
696
697/**
698 * efx_tx_queue_insert - push descriptors onto the TX queue
699 * @tx_queue: Efx TX queue
700 * @dma_addr: DMA address of fragment
701 * @len: Length of fragment
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100702 * @final_buffer: The final buffer inserted into the queue
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100703 *
704 * Push descriptors onto the TX queue. Return 0 on success or 1 if
705 * @tx_queue full.
706 */
707static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
708 dma_addr_t dma_addr, unsigned len,
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100709 struct efx_tx_buffer **final_buffer)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100710{
711 struct efx_tx_buffer *buffer;
712 struct efx_nic *efx = tx_queue->efx;
713 unsigned dma_len, fill_level, insert_ptr, misalign;
714 int q_space;
715
716 EFX_BUG_ON_PARANOID(len <= 0);
717
718 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
719 /* -1 as there is no way to represent all descriptors used */
720 q_space = efx->type->txd_ring_mask - 1 - fill_level;
721
722 while (1) {
723 if (unlikely(q_space-- <= 0)) {
724 /* It might be that completions have happened
725 * since the xmit path last checked. Update
726 * the xmit path's copy of read_count.
727 */
728 ++tx_queue->stopped;
729 /* This memory barrier protects the change of
730 * stopped from the access of read_count. */
731 smp_mb();
732 tx_queue->old_read_count =
733 *(volatile unsigned *)&tx_queue->read_count;
734 fill_level = (tx_queue->insert_count
735 - tx_queue->old_read_count);
736 q_space = efx->type->txd_ring_mask - 1 - fill_level;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100737 if (unlikely(q_space-- <= 0)) {
738 *final_buffer = NULL;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100739 return 1;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100740 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100741 smp_mb();
742 --tx_queue->stopped;
743 }
744
745 insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask;
746 buffer = &tx_queue->buffer[insert_ptr];
747 ++tx_queue->insert_count;
748
749 EFX_BUG_ON_PARANOID(tx_queue->insert_count -
750 tx_queue->read_count >
751 efx->type->txd_ring_mask);
752
753 efx_tsoh_free(tx_queue, buffer);
754 EFX_BUG_ON_PARANOID(buffer->len);
755 EFX_BUG_ON_PARANOID(buffer->unmap_len);
756 EFX_BUG_ON_PARANOID(buffer->skb);
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100757 EFX_BUG_ON_PARANOID(!buffer->continuation);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100758 EFX_BUG_ON_PARANOID(buffer->tsoh);
759
760 buffer->dma_addr = dma_addr;
761
762 /* Ensure we do not cross a boundary unsupported by H/W */
763 dma_len = (~dma_addr & efx->type->tx_dma_mask) + 1;
764
765 misalign = (unsigned)dma_addr & efx->type->bug5391_mask;
766 if (misalign && dma_len + misalign > 512)
767 dma_len = 512 - misalign;
768
769 /* If there is enough space to send then do so */
770 if (dma_len >= len)
771 break;
772
773 buffer->len = dma_len; /* Don't set the other members */
774 dma_addr += dma_len;
775 len -= dma_len;
776 }
777
778 EFX_BUG_ON_PARANOID(!len);
779 buffer->len = len;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100780 *final_buffer = buffer;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100781 return 0;
782}
783
784
785/*
786 * Put a TSO header into the TX queue.
787 *
788 * This is special-cased because we know that it is small enough to fit in
789 * a single fragment, and we know it doesn't cross a page boundary. It
790 * also allows us to not worry about end-of-packet etc.
791 */
Ben Hutchings4d566062008-09-01 12:47:12 +0100792static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
793 struct efx_tso_header *tsoh, unsigned len)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100794{
795 struct efx_tx_buffer *buffer;
796
797 buffer = &tx_queue->buffer[tx_queue->insert_count &
798 tx_queue->efx->type->txd_ring_mask];
799 efx_tsoh_free(tx_queue, buffer);
800 EFX_BUG_ON_PARANOID(buffer->len);
801 EFX_BUG_ON_PARANOID(buffer->unmap_len);
802 EFX_BUG_ON_PARANOID(buffer->skb);
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100803 EFX_BUG_ON_PARANOID(!buffer->continuation);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100804 EFX_BUG_ON_PARANOID(buffer->tsoh);
805 buffer->len = len;
806 buffer->dma_addr = tsoh->dma_addr;
807 buffer->tsoh = tsoh;
808
809 ++tx_queue->insert_count;
810}
811
812
813/* Remove descriptors put into a tx_queue. */
814static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
815{
816 struct efx_tx_buffer *buffer;
Ben Hutchingscc12dac2008-09-01 12:46:43 +0100817 dma_addr_t unmap_addr;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100818
819 /* Work backwards until we hit the original insert pointer value */
820 while (tx_queue->insert_count != tx_queue->write_count) {
821 --tx_queue->insert_count;
822 buffer = &tx_queue->buffer[tx_queue->insert_count &
823 tx_queue->efx->type->txd_ring_mask];
824 efx_tsoh_free(tx_queue, buffer);
825 EFX_BUG_ON_PARANOID(buffer->skb);
826 buffer->len = 0;
Ben Hutchingsdc8cfa52008-09-01 12:46:50 +0100827 buffer->continuation = true;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100828 if (buffer->unmap_len) {
Ben Hutchingscc12dac2008-09-01 12:46:43 +0100829 unmap_addr = (buffer->dma_addr + buffer->len -
830 buffer->unmap_len);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100831 if (buffer->unmap_single)
832 pci_unmap_single(tx_queue->efx->pci_dev,
Ben Hutchingscc12dac2008-09-01 12:46:43 +0100833 unmap_addr, buffer->unmap_len,
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100834 PCI_DMA_TODEVICE);
835 else
836 pci_unmap_page(tx_queue->efx->pci_dev,
Ben Hutchingscc12dac2008-09-01 12:46:43 +0100837 unmap_addr, buffer->unmap_len,
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100838 PCI_DMA_TODEVICE);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100839 buffer->unmap_len = 0;
840 }
841 }
842}
843
844
845/* Parse the SKB header and initialise state. */
Ben Hutchings4d566062008-09-01 12:47:12 +0100846static void tso_start(struct tso_state *st, const struct sk_buff *skb)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100847{
848 /* All ethernet/IP/TCP headers combined size is TCP header size
849 * plus offset of TCP header relative to start of packet.
850 */
Ben Hutchings23d9e602008-09-01 12:47:02 +0100851 st->header_len = ((tcp_hdr(skb)->doff << 2u)
852 + PTR_DIFF(tcp_hdr(skb), skb->data));
853 st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100854
Ben Hutchings23d9e602008-09-01 12:47:02 +0100855 st->ipv4_id = ntohs(ip_hdr(skb)->id);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100856 st->seqnum = ntohl(tcp_hdr(skb)->seq);
857
858 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
859 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
860 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
861
Ben Hutchings23d9e602008-09-01 12:47:02 +0100862 st->packet_space = st->full_packet_size;
863 st->out_len = skb->len - st->header_len;
864 st->unmap_len = 0;
865 st->unmap_single = false;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100866}
867
Ben Hutchings4d566062008-09-01 12:47:12 +0100868static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
869 skb_frag_t *frag)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100870{
Ben Hutchings23d9e602008-09-01 12:47:02 +0100871 st->unmap_addr = pci_map_page(efx->pci_dev, frag->page,
872 frag->page_offset, frag->size,
873 PCI_DMA_TODEVICE);
874 if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) {
875 st->unmap_single = false;
876 st->unmap_len = frag->size;
877 st->in_len = frag->size;
878 st->dma_addr = st->unmap_addr;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100879 return 0;
880 }
881 return -ENOMEM;
882}
883
Ben Hutchings4d566062008-09-01 12:47:12 +0100884static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
885 const struct sk_buff *skb)
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100886{
Ben Hutchings23d9e602008-09-01 12:47:02 +0100887 int hl = st->header_len;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100888 int len = skb_headlen(skb) - hl;
889
Ben Hutchings23d9e602008-09-01 12:47:02 +0100890 st->unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl,
891 len, PCI_DMA_TODEVICE);
892 if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) {
893 st->unmap_single = true;
894 st->unmap_len = len;
895 st->in_len = len;
896 st->dma_addr = st->unmap_addr;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100897 return 0;
898 }
899 return -ENOMEM;
900}
901
902
903/**
904 * tso_fill_packet_with_fragment - form descriptors for the current fragment
905 * @tx_queue: Efx TX queue
906 * @skb: Socket buffer
907 * @st: TSO state
908 *
909 * Form descriptors for the current fragment, until we reach the end
910 * of fragment or end-of-packet. Return 0 on success, 1 if not enough
911 * space in @tx_queue.
912 */
Ben Hutchings4d566062008-09-01 12:47:12 +0100913static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
914 const struct sk_buff *skb,
915 struct tso_state *st)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100916{
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100917 struct efx_tx_buffer *buffer;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100918 int n, end_of_packet, rc;
919
Ben Hutchings23d9e602008-09-01 12:47:02 +0100920 if (st->in_len == 0)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100921 return 0;
922 if (st->packet_space == 0)
923 return 0;
924
Ben Hutchings23d9e602008-09-01 12:47:02 +0100925 EFX_BUG_ON_PARANOID(st->in_len <= 0);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100926 EFX_BUG_ON_PARANOID(st->packet_space <= 0);
927
Ben Hutchings23d9e602008-09-01 12:47:02 +0100928 n = min(st->in_len, st->packet_space);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100929
930 st->packet_space -= n;
Ben Hutchings23d9e602008-09-01 12:47:02 +0100931 st->out_len -= n;
932 st->in_len -= n;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100933
Ben Hutchings23d9e602008-09-01 12:47:02 +0100934 rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100935 if (likely(rc == 0)) {
Ben Hutchings23d9e602008-09-01 12:47:02 +0100936 if (st->out_len == 0)
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100937 /* Transfer ownership of the skb */
938 buffer->skb = skb;
939
Ben Hutchings23d9e602008-09-01 12:47:02 +0100940 end_of_packet = st->out_len == 0 || st->packet_space == 0;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100941 buffer->continuation = !end_of_packet;
942
Ben Hutchings23d9e602008-09-01 12:47:02 +0100943 if (st->in_len == 0) {
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100944 /* Transfer ownership of the pci mapping */
Ben Hutchings23d9e602008-09-01 12:47:02 +0100945 buffer->unmap_len = st->unmap_len;
946 buffer->unmap_single = st->unmap_single;
947 st->unmap_len = 0;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +0100948 }
949 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100950
Ben Hutchings23d9e602008-09-01 12:47:02 +0100951 st->dma_addr += n;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100952 return rc;
953}
954
955
956/**
957 * tso_start_new_packet - generate a new header and prepare for the new packet
958 * @tx_queue: Efx TX queue
959 * @skb: Socket buffer
960 * @st: TSO state
961 *
962 * Generate a new header and prepare for the new packet. Return 0 on
963 * success, or -1 if failed to alloc header.
964 */
Ben Hutchings4d566062008-09-01 12:47:12 +0100965static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
966 const struct sk_buff *skb,
967 struct tso_state *st)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100968{
969 struct efx_tso_header *tsoh;
970 struct iphdr *tsoh_iph;
971 struct tcphdr *tsoh_th;
972 unsigned ip_length;
973 u8 *header;
974
975 /* Allocate a DMA-mapped header buffer. */
Ben Hutchings23d9e602008-09-01 12:47:02 +0100976 if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) {
Ben Hutchingsb3475642008-05-16 21:15:49 +0100977 if (tx_queue->tso_headers_free == NULL) {
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100978 if (efx_tsoh_block_alloc(tx_queue))
979 return -1;
Ben Hutchingsb3475642008-05-16 21:15:49 +0100980 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100981 EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
982 tsoh = tx_queue->tso_headers_free;
983 tx_queue->tso_headers_free = tsoh->next;
984 tsoh->unmap_len = 0;
985 } else {
986 tx_queue->tso_long_headers++;
Ben Hutchings23d9e602008-09-01 12:47:02 +0100987 tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100988 if (unlikely(!tsoh))
989 return -1;
990 }
991
992 header = TSOH_BUFFER(tsoh);
993 tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb));
994 tsoh_iph = (struct iphdr *)(header + SKB_IPV4_OFF(skb));
995
996 /* Copy and update the headers. */
Ben Hutchings23d9e602008-09-01 12:47:02 +0100997 memcpy(header, skb->data, st->header_len);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +0100998
999 tsoh_th->seq = htonl(st->seqnum);
1000 st->seqnum += skb_shinfo(skb)->gso_size;
Ben Hutchings23d9e602008-09-01 12:47:02 +01001001 if (st->out_len > skb_shinfo(skb)->gso_size) {
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001002 /* This packet will not finish the TSO burst. */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001003 ip_length = st->full_packet_size - ETH_HDR_LEN(skb);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001004 tsoh_th->fin = 0;
1005 tsoh_th->psh = 0;
1006 } else {
1007 /* This packet will be the last in the TSO burst. */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001008 ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001009 tsoh_th->fin = tcp_hdr(skb)->fin;
1010 tsoh_th->psh = tcp_hdr(skb)->psh;
1011 }
1012 tsoh_iph->tot_len = htons(ip_length);
1013
1014 /* Linux leaves suitable gaps in the IP ID space for us to fill. */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001015 tsoh_iph->id = htons(st->ipv4_id);
1016 st->ipv4_id++;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001017
1018 st->packet_space = skb_shinfo(skb)->gso_size;
1019 ++tx_queue->tso_packets;
1020
1021 /* Form a descriptor for this header. */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001022 efx_tso_put_header(tx_queue, tsoh, st->header_len);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001023
1024 return 0;
1025}
1026
1027
1028/**
1029 * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
1030 * @tx_queue: Efx TX queue
1031 * @skb: Socket buffer
1032 *
1033 * Context: You must hold netif_tx_lock() to call this function.
1034 *
1035 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1036 * @skb was not enqueued. In all cases @skb is consumed. Return
1037 * %NETDEV_TX_OK or %NETDEV_TX_BUSY.
1038 */
1039static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
Ben Hutchings740847d2008-09-01 12:48:23 +01001040 struct sk_buff *skb)
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001041{
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001042 struct efx_nic *efx = tx_queue->efx;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001043 int frag_i, rc, rc2 = NETDEV_TX_OK;
1044 struct tso_state state;
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001045
1046 /* Verify TSO is safe - these checks should never fail. */
1047 efx_tso_check_safe(skb);
1048
1049 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
1050
1051 tso_start(&state, skb);
1052
1053 /* Assume that skb header area contains exactly the headers, and
1054 * all payload is in the frag list.
1055 */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001056 if (skb_headlen(skb) == state.header_len) {
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001057 /* Grab the first payload fragment. */
1058 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
1059 frag_i = 0;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001060 rc = tso_get_fragment(&state, efx,
1061 skb_shinfo(skb)->frags + frag_i);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001062 if (rc)
1063 goto mem_err;
1064 } else {
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001065 rc = tso_get_head_fragment(&state, efx, skb);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001066 if (rc)
1067 goto mem_err;
1068 frag_i = -1;
1069 }
1070
1071 if (tso_start_new_packet(tx_queue, skb, &state) < 0)
1072 goto mem_err;
1073
1074 while (1) {
1075 rc = tso_fill_packet_with_fragment(tx_queue, skb, &state);
1076 if (unlikely(rc))
1077 goto stop;
1078
1079 /* Move onto the next fragment? */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001080 if (state.in_len == 0) {
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001081 if (++frag_i >= skb_shinfo(skb)->nr_frags)
1082 /* End of payload reached. */
1083 break;
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001084 rc = tso_get_fragment(&state, efx,
1085 skb_shinfo(skb)->frags + frag_i);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001086 if (rc)
1087 goto mem_err;
1088 }
1089
1090 /* Start at new packet? */
1091 if (state.packet_space == 0 &&
1092 tso_start_new_packet(tx_queue, skb, &state) < 0)
1093 goto mem_err;
1094 }
1095
1096 /* Pass off to hardware */
1097 falcon_push_buffers(tx_queue);
1098
1099 tx_queue->tso_bursts++;
1100 return NETDEV_TX_OK;
1101
1102 mem_err:
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001103 EFX_ERR(efx, "Out of memory for TSO headers, or PCI mapping error\n");
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001104 dev_kfree_skb_any((struct sk_buff *)skb);
1105 goto unwind;
1106
1107 stop:
1108 rc2 = NETDEV_TX_BUSY;
1109
1110 /* Stop the queue if it wasn't stopped before. */
1111 if (tx_queue->stopped == 1)
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001112 efx_stop_queue(efx);
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001113
1114 unwind:
Ben Hutchings5988b632008-09-01 12:46:36 +01001115 /* Free the DMA mapping we were in the process of writing out */
Ben Hutchings23d9e602008-09-01 12:47:02 +01001116 if (state.unmap_len) {
1117 if (state.unmap_single)
1118 pci_unmap_single(efx->pci_dev, state.unmap_addr,
1119 state.unmap_len, PCI_DMA_TODEVICE);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001120 else
Ben Hutchings23d9e602008-09-01 12:47:02 +01001121 pci_unmap_page(efx->pci_dev, state.unmap_addr,
1122 state.unmap_len, PCI_DMA_TODEVICE);
Ben Hutchingsecbd95c2008-09-01 12:46:40 +01001123 }
Ben Hutchings5988b632008-09-01 12:46:36 +01001124
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001125 efx_enqueue_unwind(tx_queue);
1126 return rc2;
1127}
1128
1129
1130/*
1131 * Free up all TSO datastructures associated with tx_queue. This
1132 * routine should be called only once the tx_queue is both empty and
1133 * will no longer be used.
1134 */
1135static void efx_fini_tso(struct efx_tx_queue *tx_queue)
1136{
1137 unsigned i;
1138
Ben Hutchingsb3475642008-05-16 21:15:49 +01001139 if (tx_queue->buffer) {
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001140 for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i)
1141 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
Ben Hutchingsb3475642008-05-16 21:15:49 +01001142 }
Ben Hutchingsb9b39b62008-05-07 12:51:12 +01001143
1144 while (tx_queue->tso_headers_free != NULL)
1145 efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
1146 tx_queue->efx->pci_dev);
1147}