Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 1 | /**************************************************************************** |
| 2 | * Driver for Solarflare Solarstorm network controllers and boards |
| 3 | * Copyright 2005-2006 Fen Systems Ltd. |
Ben Hutchings | 0a6f40c | 2011-02-25 00:01:34 +0000 | [diff] [blame] | 4 | * Copyright 2005-2010 Solarflare Communications Inc. |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify it |
| 7 | * under the terms of the GNU General Public License version 2 as published |
| 8 | * by the Free Software Foundation, incorporated herein by reference. |
| 9 | */ |
| 10 | |
| 11 | #include <linux/pci.h> |
| 12 | #include <linux/tcp.h> |
| 13 | #include <linux/ip.h> |
| 14 | #include <linux/in.h> |
Ben Hutchings | 738a8f4 | 2009-11-29 15:16:05 +0000 | [diff] [blame] | 15 | #include <linux/ipv6.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 16 | #include <linux/slab.h> |
Ben Hutchings | 738a8f4 | 2009-11-29 15:16:05 +0000 | [diff] [blame] | 17 | #include <net/ipv6.h> |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 18 | #include <linux/if_ether.h> |
| 19 | #include <linux/highmem.h> |
| 20 | #include "net_driver.h" |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 21 | #include "efx.h" |
Ben Hutchings | 744093c | 2009-11-29 15:12:08 +0000 | [diff] [blame] | 22 | #include "nic.h" |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 23 | #include "workarounds.h" |
| 24 | |
| 25 | /* |
| 26 | * TX descriptor ring full threshold |
| 27 | * |
| 28 | * The tx_queue descriptor ring fill-level must fall below this value |
| 29 | * before we restart the netif queue |
| 30 | */ |
Steve Hodgson | ecc910f | 2010-09-10 06:42:22 +0000 | [diff] [blame] | 31 | #define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u) |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 32 | |
Ben Hutchings | 4d56606 | 2008-09-01 12:47:12 +0100 | [diff] [blame] | 33 | static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, |
Tom Herbert | c394099 | 2011-11-28 16:33:43 +0000 | [diff] [blame] | 34 | struct efx_tx_buffer *buffer, |
| 35 | unsigned int *pkts_compl, |
| 36 | unsigned int *bytes_compl) |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 37 | { |
| 38 | if (buffer->unmap_len) { |
Ben Hutchings | 0e33d87 | 2012-05-17 17:46:55 +0100 | [diff] [blame] | 39 | struct device *dma_dev = &tx_queue->efx->pci_dev->dev; |
Ben Hutchings | cc12dac | 2008-09-01 12:46:43 +0100 | [diff] [blame] | 40 | dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len - |
| 41 | buffer->unmap_len); |
Ben Hutchings | 7668ff9 | 2012-05-17 20:52:20 +0100 | [diff] [blame^] | 42 | if (buffer->flags & EFX_TX_BUF_MAP_SINGLE) |
Ben Hutchings | 0e33d87 | 2012-05-17 17:46:55 +0100 | [diff] [blame] | 43 | dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len, |
| 44 | DMA_TO_DEVICE); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 45 | else |
Ben Hutchings | 0e33d87 | 2012-05-17 17:46:55 +0100 | [diff] [blame] | 46 | dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len, |
| 47 | DMA_TO_DEVICE); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 48 | buffer->unmap_len = 0; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 49 | } |
| 50 | |
Ben Hutchings | 7668ff9 | 2012-05-17 20:52:20 +0100 | [diff] [blame^] | 51 | if (buffer->flags & EFX_TX_BUF_SKB) { |
Tom Herbert | c394099 | 2011-11-28 16:33:43 +0000 | [diff] [blame] | 52 | (*pkts_compl)++; |
| 53 | (*bytes_compl) += buffer->skb->len; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 54 | dev_kfree_skb_any((struct sk_buff *) buffer->skb); |
Ben Hutchings | 62776d0 | 2010-06-23 11:30:07 +0000 | [diff] [blame] | 55 | netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, |
| 56 | "TX queue %d transmission id %x complete\n", |
| 57 | tx_queue->queue, tx_queue->read_count); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 58 | } |
Ben Hutchings | 7668ff9 | 2012-05-17 20:52:20 +0100 | [diff] [blame^] | 59 | |
| 60 | buffer->flags &= EFX_TX_BUF_TSOH; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 61 | } |
| 62 | |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 63 | /** |
| 64 | * struct efx_tso_header - a DMA mapped buffer for packet headers |
| 65 | * @next: Linked list of free ones. |
| 66 | * The list is protected by the TX queue lock. |
| 67 | * @dma_unmap_len: Length to unmap for an oversize buffer, or 0. |
| 68 | * @dma_addr: The DMA address of the header below. |
| 69 | * |
| 70 | * This controls the memory used for a TSO header. Use TSOH_DATA() |
| 71 | * to find the packet header data. Use TSOH_SIZE() to calculate the |
| 72 | * total size required for a given packet header length. TSO headers |
| 73 | * in the free list are exactly %TSOH_STD_SIZE bytes in size. |
| 74 | */ |
| 75 | struct efx_tso_header { |
| 76 | union { |
| 77 | struct efx_tso_header *next; |
| 78 | size_t unmap_len; |
| 79 | }; |
| 80 | dma_addr_t dma_addr; |
| 81 | }; |
| 82 | |
| 83 | static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, |
Ben Hutchings | 740847d | 2008-09-01 12:48:23 +0100 | [diff] [blame] | 84 | struct sk_buff *skb); |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 85 | static void efx_fini_tso(struct efx_tx_queue *tx_queue); |
| 86 | static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, |
| 87 | struct efx_tso_header *tsoh); |
| 88 | |
Ben Hutchings | 4d56606 | 2008-09-01 12:47:12 +0100 | [diff] [blame] | 89 | static void efx_tsoh_free(struct efx_tx_queue *tx_queue, |
| 90 | struct efx_tx_buffer *buffer) |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 91 | { |
Ben Hutchings | 7668ff9 | 2012-05-17 20:52:20 +0100 | [diff] [blame^] | 92 | if (buffer->flags & EFX_TX_BUF_TSOH) { |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 93 | if (likely(!buffer->tsoh->unmap_len)) { |
| 94 | buffer->tsoh->next = tx_queue->tso_headers_free; |
| 95 | tx_queue->tso_headers_free = buffer->tsoh; |
| 96 | } else { |
| 97 | efx_tsoh_heap_free(tx_queue, buffer->tsoh); |
| 98 | } |
Ben Hutchings | 7668ff9 | 2012-05-17 20:52:20 +0100 | [diff] [blame^] | 99 | buffer->flags &= ~EFX_TX_BUF_TSOH; |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 100 | } |
| 101 | } |
| 102 | |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 103 | |
Ben Hutchings | 63f1988 | 2009-10-23 08:31:20 +0000 | [diff] [blame] | 104 | static inline unsigned |
| 105 | efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr) |
| 106 | { |
| 107 | /* Depending on the NIC revision, we can use descriptor |
| 108 | * lengths up to 8K or 8K-1. However, since PCI Express |
| 109 | * devices must split read requests at 4K boundaries, there is |
| 110 | * little benefit from using descriptors that cross those |
| 111 | * boundaries and we keep things simple by not doing so. |
| 112 | */ |
Ben Hutchings | 5b6262d | 2012-02-02 21:21:15 +0000 | [diff] [blame] | 113 | unsigned len = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1; |
Ben Hutchings | 63f1988 | 2009-10-23 08:31:20 +0000 | [diff] [blame] | 114 | |
| 115 | /* Work around hardware bug for unaligned buffers. */ |
| 116 | if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf)) |
| 117 | len = min_t(unsigned, len, 512 - (dma_addr & 0xf)); |
| 118 | |
| 119 | return len; |
| 120 | } |
| 121 | |
Ben Hutchings | 7e6d06f | 2012-07-30 15:57:44 +0000 | [diff] [blame] | 122 | unsigned int efx_tx_max_skb_descs(struct efx_nic *efx) |
| 123 | { |
| 124 | /* Header and payload descriptor for each output segment, plus |
| 125 | * one for every input fragment boundary within a segment |
| 126 | */ |
| 127 | unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS; |
| 128 | |
| 129 | /* Possibly one more per segment for the alignment workaround */ |
| 130 | if (EFX_WORKAROUND_5391(efx)) |
| 131 | max_descs += EFX_TSO_MAX_SEGS; |
| 132 | |
| 133 | /* Possibly more for PCIe page boundaries within input fragments */ |
| 134 | if (PAGE_SIZE > EFX_PAGE_SIZE) |
| 135 | max_descs += max_t(unsigned int, MAX_SKB_FRAGS, |
| 136 | DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE)); |
| 137 | |
| 138 | return max_descs; |
| 139 | } |
| 140 | |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 141 | /* |
| 142 | * Add a socket buffer to a TX queue |
| 143 | * |
| 144 | * This maps all fragments of a socket buffer for DMA and adds them to |
| 145 | * the TX queue. The queue's insert pointer will be incremented by |
| 146 | * the number of fragments in the socket buffer. |
| 147 | * |
| 148 | * If any DMA mapping fails, any mapped fragments will be unmapped, |
| 149 | * the queue's insert pointer will be restored to its original value. |
| 150 | * |
Ben Hutchings | 497f5ba | 2009-11-23 16:07:05 +0000 | [diff] [blame] | 151 | * This function is split out from efx_hard_start_xmit to allow the |
| 152 | * loopback test to direct packets via specific TX queues. |
| 153 | * |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 154 | * Returns NETDEV_TX_OK or NETDEV_TX_BUSY |
| 155 | * You must hold netif_tx_lock() to call this function. |
| 156 | */ |
Ben Hutchings | 497f5ba | 2009-11-23 16:07:05 +0000 | [diff] [blame] | 157 | netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 158 | { |
| 159 | struct efx_nic *efx = tx_queue->efx; |
Ben Hutchings | 0e33d87 | 2012-05-17 17:46:55 +0100 | [diff] [blame] | 160 | struct device *dma_dev = &efx->pci_dev->dev; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 161 | struct efx_tx_buffer *buffer; |
| 162 | skb_frag_t *fragment; |
Ben Hutchings | 63f1988 | 2009-10-23 08:31:20 +0000 | [diff] [blame] | 163 | unsigned int len, unmap_len = 0, fill_level, insert_ptr; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 164 | dma_addr_t dma_addr, unmap_addr = 0; |
| 165 | unsigned int dma_len; |
Ben Hutchings | 7668ff9 | 2012-05-17 20:52:20 +0100 | [diff] [blame^] | 166 | unsigned short dma_flags; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 167 | int q_space, i = 0; |
Stephen Hemminger | 61357325 | 2009-08-31 19:50:58 +0000 | [diff] [blame] | 168 | netdev_tx_t rc = NETDEV_TX_OK; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 169 | |
| 170 | EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); |
| 171 | |
Ben Hutchings | 9bc183d | 2009-11-23 16:06:47 +0000 | [diff] [blame] | 172 | if (skb_shinfo(skb)->gso_size) |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 173 | return efx_enqueue_skb_tso(tx_queue, skb); |
| 174 | |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 175 | /* Get size of the initial fragment */ |
| 176 | len = skb_headlen(skb); |
| 177 | |
Ben Hutchings | bb145a9 | 2009-03-20 13:25:39 +0000 | [diff] [blame] | 178 | /* Pad if necessary */ |
| 179 | if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) { |
| 180 | EFX_BUG_ON_PARANOID(skb->data_len); |
| 181 | len = 32 + 1; |
| 182 | if (skb_pad(skb, len - skb->len)) |
| 183 | return NETDEV_TX_OK; |
| 184 | } |
| 185 | |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 186 | fill_level = tx_queue->insert_count - tx_queue->old_read_count; |
Steve Hodgson | ecc910f | 2010-09-10 06:42:22 +0000 | [diff] [blame] | 187 | q_space = efx->txq_entries - 1 - fill_level; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 188 | |
Ben Hutchings | 0e33d87 | 2012-05-17 17:46:55 +0100 | [diff] [blame] | 189 | /* Map for DMA. Use dma_map_single rather than dma_map_page |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 190 | * since this is more efficient on machines with sparse |
| 191 | * memory. |
| 192 | */ |
Ben Hutchings | 7668ff9 | 2012-05-17 20:52:20 +0100 | [diff] [blame^] | 193 | dma_flags = EFX_TX_BUF_MAP_SINGLE; |
Ben Hutchings | 0e33d87 | 2012-05-17 17:46:55 +0100 | [diff] [blame] | 194 | dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 195 | |
| 196 | /* Process all fragments */ |
| 197 | while (1) { |
Ben Hutchings | 0e33d87 | 2012-05-17 17:46:55 +0100 | [diff] [blame] | 198 | if (unlikely(dma_mapping_error(dma_dev, dma_addr))) |
| 199 | goto dma_err; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 200 | |
| 201 | /* Store fields for marking in the per-fragment final |
| 202 | * descriptor */ |
| 203 | unmap_len = len; |
| 204 | unmap_addr = dma_addr; |
| 205 | |
| 206 | /* Add to TX queue, splitting across DMA boundaries */ |
| 207 | do { |
| 208 | if (unlikely(q_space-- <= 0)) { |
| 209 | /* It might be that completions have |
| 210 | * happened since the xmit path last |
| 211 | * checked. Update the xmit path's |
| 212 | * copy of read_count. |
| 213 | */ |
Ben Hutchings | c04bfc6 | 2010-12-10 01:24:16 +0000 | [diff] [blame] | 214 | netif_tx_stop_queue(tx_queue->core_txq); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 215 | /* This memory barrier protects the |
Ben Hutchings | c04bfc6 | 2010-12-10 01:24:16 +0000 | [diff] [blame] | 216 | * change of queue state from the access |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 217 | * of read_count. */ |
| 218 | smp_mb(); |
| 219 | tx_queue->old_read_count = |
Ben Hutchings | 51c56f4 | 2010-11-10 18:46:40 +0000 | [diff] [blame] | 220 | ACCESS_ONCE(tx_queue->read_count); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 221 | fill_level = (tx_queue->insert_count |
| 222 | - tx_queue->old_read_count); |
Steve Hodgson | ecc910f | 2010-09-10 06:42:22 +0000 | [diff] [blame] | 223 | q_space = efx->txq_entries - 1 - fill_level; |
Ben Hutchings | c04bfc6 | 2010-12-10 01:24:16 +0000 | [diff] [blame] | 224 | if (unlikely(q_space-- <= 0)) { |
| 225 | rc = NETDEV_TX_BUSY; |
| 226 | goto unwind; |
| 227 | } |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 228 | smp_mb(); |
Ben Hutchings | e4abce8 | 2011-05-16 18:51:24 +0100 | [diff] [blame] | 229 | if (likely(!efx->loopback_selftest)) |
| 230 | netif_tx_start_queue( |
| 231 | tx_queue->core_txq); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 232 | } |
| 233 | |
Steve Hodgson | ecc910f | 2010-09-10 06:42:22 +0000 | [diff] [blame] | 234 | insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 235 | buffer = &tx_queue->buffer[insert_ptr]; |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 236 | efx_tsoh_free(tx_queue, buffer); |
Ben Hutchings | 7668ff9 | 2012-05-17 20:52:20 +0100 | [diff] [blame^] | 237 | EFX_BUG_ON_PARANOID(buffer->flags); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 238 | EFX_BUG_ON_PARANOID(buffer->len); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 239 | EFX_BUG_ON_PARANOID(buffer->unmap_len); |
| 240 | |
Ben Hutchings | 63f1988 | 2009-10-23 08:31:20 +0000 | [diff] [blame] | 241 | dma_len = efx_max_tx_len(efx, dma_addr); |
| 242 | if (likely(dma_len >= len)) |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 243 | dma_len = len; |
| 244 | |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 245 | /* Fill out per descriptor fields */ |
| 246 | buffer->len = dma_len; |
| 247 | buffer->dma_addr = dma_addr; |
Ben Hutchings | 7668ff9 | 2012-05-17 20:52:20 +0100 | [diff] [blame^] | 248 | buffer->flags = EFX_TX_BUF_CONT; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 249 | len -= dma_len; |
| 250 | dma_addr += dma_len; |
| 251 | ++tx_queue->insert_count; |
| 252 | } while (len); |
| 253 | |
| 254 | /* Transfer ownership of the unmapping to the final buffer */ |
Ben Hutchings | 7668ff9 | 2012-05-17 20:52:20 +0100 | [diff] [blame^] | 255 | buffer->flags = EFX_TX_BUF_CONT | dma_flags; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 256 | buffer->unmap_len = unmap_len; |
| 257 | unmap_len = 0; |
| 258 | |
| 259 | /* Get address and size of next fragment */ |
| 260 | if (i >= skb_shinfo(skb)->nr_frags) |
| 261 | break; |
| 262 | fragment = &skb_shinfo(skb)->frags[i]; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 263 | len = skb_frag_size(fragment); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 264 | i++; |
| 265 | /* Map for DMA */ |
Ben Hutchings | 7668ff9 | 2012-05-17 20:52:20 +0100 | [diff] [blame^] | 266 | dma_flags = 0; |
Ben Hutchings | 0e33d87 | 2012-05-17 17:46:55 +0100 | [diff] [blame] | 267 | dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len, |
Ian Campbell | 5d6bcdf | 2011-10-06 11:10:48 +0100 | [diff] [blame] | 268 | DMA_TO_DEVICE); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 269 | } |
| 270 | |
| 271 | /* Transfer ownership of the skb to the final buffer */ |
| 272 | buffer->skb = skb; |
Ben Hutchings | 7668ff9 | 2012-05-17 20:52:20 +0100 | [diff] [blame^] | 273 | buffer->flags = EFX_TX_BUF_SKB | dma_flags; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 274 | |
Tom Herbert | c394099 | 2011-11-28 16:33:43 +0000 | [diff] [blame] | 275 | netdev_tx_sent_queue(tx_queue->core_txq, skb->len); |
| 276 | |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 277 | /* Pass off to hardware */ |
Ben Hutchings | 152b6a6 | 2009-11-29 03:43:56 +0000 | [diff] [blame] | 278 | efx_nic_push_buffers(tx_queue); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 279 | |
| 280 | return NETDEV_TX_OK; |
| 281 | |
Ben Hutchings | 0e33d87 | 2012-05-17 17:46:55 +0100 | [diff] [blame] | 282 | dma_err: |
Ben Hutchings | 62776d0 | 2010-06-23 11:30:07 +0000 | [diff] [blame] | 283 | netif_err(efx, tx_err, efx->net_dev, |
| 284 | " TX queue %d could not map skb with %d bytes %d " |
| 285 | "fragments for DMA\n", tx_queue->queue, skb->len, |
| 286 | skb_shinfo(skb)->nr_frags + 1); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 287 | |
| 288 | /* Mark the packet as transmitted, and free the SKB ourselves */ |
Ben Hutchings | 9bc183d | 2009-11-23 16:06:47 +0000 | [diff] [blame] | 289 | dev_kfree_skb_any(skb); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 290 | |
| 291 | unwind: |
| 292 | /* Work backwards until we hit the original insert pointer value */ |
| 293 | while (tx_queue->insert_count != tx_queue->write_count) { |
Tom Herbert | c394099 | 2011-11-28 16:33:43 +0000 | [diff] [blame] | 294 | unsigned int pkts_compl = 0, bytes_compl = 0; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 295 | --tx_queue->insert_count; |
Steve Hodgson | ecc910f | 2010-09-10 06:42:22 +0000 | [diff] [blame] | 296 | insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 297 | buffer = &tx_queue->buffer[insert_ptr]; |
Tom Herbert | c394099 | 2011-11-28 16:33:43 +0000 | [diff] [blame] | 298 | efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 299 | buffer->len = 0; |
| 300 | } |
| 301 | |
| 302 | /* Free the fragment we were mid-way through pushing */ |
Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 303 | if (unmap_len) { |
Ben Hutchings | 7668ff9 | 2012-05-17 20:52:20 +0100 | [diff] [blame^] | 304 | if (dma_flags & EFX_TX_BUF_MAP_SINGLE) |
Ben Hutchings | 0e33d87 | 2012-05-17 17:46:55 +0100 | [diff] [blame] | 305 | dma_unmap_single(dma_dev, unmap_addr, unmap_len, |
| 306 | DMA_TO_DEVICE); |
Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 307 | else |
Ben Hutchings | 0e33d87 | 2012-05-17 17:46:55 +0100 | [diff] [blame] | 308 | dma_unmap_page(dma_dev, unmap_addr, unmap_len, |
| 309 | DMA_TO_DEVICE); |
Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 310 | } |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 311 | |
| 312 | return rc; |
| 313 | } |
| 314 | |
| 315 | /* Remove packets from the TX queue |
| 316 | * |
| 317 | * This removes packets from the TX queue, up to and including the |
| 318 | * specified index. |
| 319 | */ |
Ben Hutchings | 4d56606 | 2008-09-01 12:47:12 +0100 | [diff] [blame] | 320 | static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, |
Tom Herbert | c394099 | 2011-11-28 16:33:43 +0000 | [diff] [blame] | 321 | unsigned int index, |
| 322 | unsigned int *pkts_compl, |
| 323 | unsigned int *bytes_compl) |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 324 | { |
| 325 | struct efx_nic *efx = tx_queue->efx; |
| 326 | unsigned int stop_index, read_ptr; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 327 | |
Steve Hodgson | ecc910f | 2010-09-10 06:42:22 +0000 | [diff] [blame] | 328 | stop_index = (index + 1) & tx_queue->ptr_mask; |
| 329 | read_ptr = tx_queue->read_count & tx_queue->ptr_mask; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 330 | |
| 331 | while (read_ptr != stop_index) { |
| 332 | struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; |
| 333 | if (unlikely(buffer->len == 0)) { |
Ben Hutchings | 62776d0 | 2010-06-23 11:30:07 +0000 | [diff] [blame] | 334 | netif_err(efx, tx_err, efx->net_dev, |
| 335 | "TX queue %d spurious TX completion id %x\n", |
| 336 | tx_queue->queue, read_ptr); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 337 | efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); |
| 338 | return; |
| 339 | } |
| 340 | |
Tom Herbert | c394099 | 2011-11-28 16:33:43 +0000 | [diff] [blame] | 341 | efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 342 | buffer->len = 0; |
| 343 | |
| 344 | ++tx_queue->read_count; |
Steve Hodgson | ecc910f | 2010-09-10 06:42:22 +0000 | [diff] [blame] | 345 | read_ptr = tx_queue->read_count & tx_queue->ptr_mask; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 346 | } |
| 347 | } |
| 348 | |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 349 | /* Initiate a packet transmission. We use one channel per CPU |
| 350 | * (sharing when we have more CPUs than channels). On Falcon, the TX |
| 351 | * completion events will be directed back to the CPU that transmitted |
| 352 | * the packet, which should be cache-efficient. |
| 353 | * |
| 354 | * Context: non-blocking. |
| 355 | * Note that returning anything other than NETDEV_TX_OK will cause the |
| 356 | * OS to free the skb. |
| 357 | */ |
Stephen Hemminger | 61357325 | 2009-08-31 19:50:58 +0000 | [diff] [blame] | 358 | netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, |
Ben Hutchings | 2d0cc56 | 2012-02-17 00:10:45 +0000 | [diff] [blame] | 359 | struct net_device *net_dev) |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 360 | { |
Ben Hutchings | 767e468 | 2008-09-01 12:43:14 +0100 | [diff] [blame] | 361 | struct efx_nic *efx = netdev_priv(net_dev); |
Ben Hutchings | 60ac106 | 2008-09-01 12:44:59 +0100 | [diff] [blame] | 362 | struct efx_tx_queue *tx_queue; |
Ben Hutchings | 94b274b | 2011-01-10 21:18:20 +0000 | [diff] [blame] | 363 | unsigned index, type; |
Ben Hutchings | 60ac106 | 2008-09-01 12:44:59 +0100 | [diff] [blame] | 364 | |
Ben Hutchings | e4abce8 | 2011-05-16 18:51:24 +0100 | [diff] [blame] | 365 | EFX_WARN_ON_PARANOID(!netif_device_present(net_dev)); |
Ben Hutchings | a7ef593 | 2009-03-04 09:52:37 +0000 | [diff] [blame] | 366 | |
Ben Hutchings | 94b274b | 2011-01-10 21:18:20 +0000 | [diff] [blame] | 367 | index = skb_get_queue_mapping(skb); |
| 368 | type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0; |
| 369 | if (index >= efx->n_tx_channels) { |
| 370 | index -= efx->n_tx_channels; |
| 371 | type |= EFX_TXQ_TYPE_HIGHPRI; |
| 372 | } |
| 373 | tx_queue = efx_get_tx_queue(efx, index, type); |
Ben Hutchings | 60ac106 | 2008-09-01 12:44:59 +0100 | [diff] [blame] | 374 | |
Ben Hutchings | 497f5ba | 2009-11-23 16:07:05 +0000 | [diff] [blame] | 375 | return efx_enqueue_skb(tx_queue, skb); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 376 | } |
| 377 | |
Ben Hutchings | 60031fc | 2011-01-12 18:39:40 +0000 | [diff] [blame] | 378 | void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) |
| 379 | { |
Ben Hutchings | 94b274b | 2011-01-10 21:18:20 +0000 | [diff] [blame] | 380 | struct efx_nic *efx = tx_queue->efx; |
| 381 | |
Ben Hutchings | 60031fc | 2011-01-12 18:39:40 +0000 | [diff] [blame] | 382 | /* Must be inverse of queue lookup in efx_hard_start_xmit() */ |
Ben Hutchings | 94b274b | 2011-01-10 21:18:20 +0000 | [diff] [blame] | 383 | tx_queue->core_txq = |
| 384 | netdev_get_tx_queue(efx->net_dev, |
| 385 | tx_queue->queue / EFX_TXQ_TYPES + |
| 386 | ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? |
| 387 | efx->n_tx_channels : 0)); |
| 388 | } |
| 389 | |
| 390 | int efx_setup_tc(struct net_device *net_dev, u8 num_tc) |
| 391 | { |
| 392 | struct efx_nic *efx = netdev_priv(net_dev); |
| 393 | struct efx_channel *channel; |
| 394 | struct efx_tx_queue *tx_queue; |
| 395 | unsigned tc; |
| 396 | int rc; |
| 397 | |
| 398 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC) |
| 399 | return -EINVAL; |
| 400 | |
| 401 | if (num_tc == net_dev->num_tc) |
| 402 | return 0; |
| 403 | |
| 404 | for (tc = 0; tc < num_tc; tc++) { |
| 405 | net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels; |
| 406 | net_dev->tc_to_txq[tc].count = efx->n_tx_channels; |
| 407 | } |
| 408 | |
| 409 | if (num_tc > net_dev->num_tc) { |
| 410 | /* Initialise high-priority queues as necessary */ |
| 411 | efx_for_each_channel(channel, efx) { |
| 412 | efx_for_each_possible_channel_tx_queue(tx_queue, |
| 413 | channel) { |
| 414 | if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI)) |
| 415 | continue; |
| 416 | if (!tx_queue->buffer) { |
| 417 | rc = efx_probe_tx_queue(tx_queue); |
| 418 | if (rc) |
| 419 | return rc; |
| 420 | } |
| 421 | if (!tx_queue->initialised) |
| 422 | efx_init_tx_queue(tx_queue); |
| 423 | efx_init_tx_queue_core_txq(tx_queue); |
| 424 | } |
| 425 | } |
| 426 | } else { |
| 427 | /* Reduce number of classes before number of queues */ |
| 428 | net_dev->num_tc = num_tc; |
| 429 | } |
| 430 | |
| 431 | rc = netif_set_real_num_tx_queues(net_dev, |
| 432 | max_t(int, num_tc, 1) * |
| 433 | efx->n_tx_channels); |
| 434 | if (rc) |
| 435 | return rc; |
| 436 | |
| 437 | /* Do not destroy high-priority queues when they become |
| 438 | * unused. We would have to flush them first, and it is |
| 439 | * fairly difficult to flush a subset of TX queues. Leave |
| 440 | * it to efx_fini_channels(). |
| 441 | */ |
| 442 | |
| 443 | net_dev->num_tc = num_tc; |
| 444 | return 0; |
Ben Hutchings | 60031fc | 2011-01-12 18:39:40 +0000 | [diff] [blame] | 445 | } |
| 446 | |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 447 | void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) |
| 448 | { |
| 449 | unsigned fill_level; |
| 450 | struct efx_nic *efx = tx_queue->efx; |
Tom Herbert | c394099 | 2011-11-28 16:33:43 +0000 | [diff] [blame] | 451 | unsigned int pkts_compl = 0, bytes_compl = 0; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 452 | |
Steve Hodgson | ecc910f | 2010-09-10 06:42:22 +0000 | [diff] [blame] | 453 | EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 454 | |
Tom Herbert | c394099 | 2011-11-28 16:33:43 +0000 | [diff] [blame] | 455 | efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl); |
| 456 | netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 457 | |
| 458 | /* See if we need to restart the netif queue. This barrier |
Ben Hutchings | c04bfc6 | 2010-12-10 01:24:16 +0000 | [diff] [blame] | 459 | * separates the update of read_count from the test of the |
| 460 | * queue state. */ |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 461 | smp_mb(); |
Ben Hutchings | c04bfc6 | 2010-12-10 01:24:16 +0000 | [diff] [blame] | 462 | if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && |
Neil Turton | 9d1aea6 | 2011-04-04 13:46:23 +0100 | [diff] [blame] | 463 | likely(efx->port_enabled) && |
Ben Hutchings | e4abce8 | 2011-05-16 18:51:24 +0100 | [diff] [blame] | 464 | likely(netif_device_present(efx->net_dev))) { |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 465 | fill_level = tx_queue->insert_count - tx_queue->read_count; |
Ben Hutchings | 73ba7b6 | 2012-01-09 19:47:08 +0000 | [diff] [blame] | 466 | if (fill_level < EFX_TXQ_THRESHOLD(efx)) |
Ben Hutchings | c04bfc6 | 2010-12-10 01:24:16 +0000 | [diff] [blame] | 467 | netif_tx_wake_queue(tx_queue->core_txq); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 468 | } |
Ben Hutchings | cd38557 | 2010-11-15 23:53:11 +0000 | [diff] [blame] | 469 | |
| 470 | /* Check whether the hardware queue is now empty */ |
| 471 | if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { |
| 472 | tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count); |
| 473 | if (tx_queue->read_count == tx_queue->old_write_count) { |
| 474 | smp_mb(); |
| 475 | tx_queue->empty_read_count = |
| 476 | tx_queue->read_count | EFX_EMPTY_COUNT_VALID; |
| 477 | } |
| 478 | } |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 479 | } |
| 480 | |
| 481 | int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) |
| 482 | { |
| 483 | struct efx_nic *efx = tx_queue->efx; |
Steve Hodgson | ecc910f | 2010-09-10 06:42:22 +0000 | [diff] [blame] | 484 | unsigned int entries; |
Ben Hutchings | 7668ff9 | 2012-05-17 20:52:20 +0100 | [diff] [blame^] | 485 | int rc; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 486 | |
Steve Hodgson | ecc910f | 2010-09-10 06:42:22 +0000 | [diff] [blame] | 487 | /* Create the smallest power-of-two aligned ring */ |
| 488 | entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE); |
| 489 | EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); |
| 490 | tx_queue->ptr_mask = entries - 1; |
| 491 | |
| 492 | netif_dbg(efx, probe, efx->net_dev, |
| 493 | "creating TX queue %d size %#x mask %#x\n", |
| 494 | tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 495 | |
| 496 | /* Allocate software ring */ |
Thomas Meyer | c2e4e25 | 2011-12-02 12:36:13 +0000 | [diff] [blame] | 497 | tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer), |
Steve Hodgson | ecc910f | 2010-09-10 06:42:22 +0000 | [diff] [blame] | 498 | GFP_KERNEL); |
Ben Hutchings | 60ac106 | 2008-09-01 12:44:59 +0100 | [diff] [blame] | 499 | if (!tx_queue->buffer) |
| 500 | return -ENOMEM; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 501 | |
| 502 | /* Allocate hardware ring */ |
Ben Hutchings | 152b6a6 | 2009-11-29 03:43:56 +0000 | [diff] [blame] | 503 | rc = efx_nic_probe_tx(tx_queue); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 504 | if (rc) |
Ben Hutchings | 60ac106 | 2008-09-01 12:44:59 +0100 | [diff] [blame] | 505 | goto fail; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 506 | |
| 507 | return 0; |
| 508 | |
Ben Hutchings | 60ac106 | 2008-09-01 12:44:59 +0100 | [diff] [blame] | 509 | fail: |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 510 | kfree(tx_queue->buffer); |
| 511 | tx_queue->buffer = NULL; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 512 | return rc; |
| 513 | } |
| 514 | |
Ben Hutchings | bc3c90a | 2008-09-01 12:48:46 +0100 | [diff] [blame] | 515 | void efx_init_tx_queue(struct efx_tx_queue *tx_queue) |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 516 | { |
Ben Hutchings | 62776d0 | 2010-06-23 11:30:07 +0000 | [diff] [blame] | 517 | netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, |
| 518 | "initialising TX queue %d\n", tx_queue->queue); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 519 | |
| 520 | tx_queue->insert_count = 0; |
| 521 | tx_queue->write_count = 0; |
Ben Hutchings | cd38557 | 2010-11-15 23:53:11 +0000 | [diff] [blame] | 522 | tx_queue->old_write_count = 0; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 523 | tx_queue->read_count = 0; |
| 524 | tx_queue->old_read_count = 0; |
Ben Hutchings | cd38557 | 2010-11-15 23:53:11 +0000 | [diff] [blame] | 525 | tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 526 | |
| 527 | /* Set up TX descriptor ring */ |
Ben Hutchings | 152b6a6 | 2009-11-29 03:43:56 +0000 | [diff] [blame] | 528 | efx_nic_init_tx(tx_queue); |
Ben Hutchings | 94b274b | 2011-01-10 21:18:20 +0000 | [diff] [blame] | 529 | |
| 530 | tx_queue->initialised = true; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 531 | } |
| 532 | |
| 533 | void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) |
| 534 | { |
| 535 | struct efx_tx_buffer *buffer; |
| 536 | |
| 537 | if (!tx_queue->buffer) |
| 538 | return; |
| 539 | |
| 540 | /* Free any buffers left in the ring */ |
| 541 | while (tx_queue->read_count != tx_queue->write_count) { |
Tom Herbert | c394099 | 2011-11-28 16:33:43 +0000 | [diff] [blame] | 542 | unsigned int pkts_compl = 0, bytes_compl = 0; |
Steve Hodgson | ecc910f | 2010-09-10 06:42:22 +0000 | [diff] [blame] | 543 | buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; |
Tom Herbert | c394099 | 2011-11-28 16:33:43 +0000 | [diff] [blame] | 544 | efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 545 | buffer->len = 0; |
| 546 | |
| 547 | ++tx_queue->read_count; |
| 548 | } |
Tom Herbert | c394099 | 2011-11-28 16:33:43 +0000 | [diff] [blame] | 549 | netdev_tx_reset_queue(tx_queue->core_txq); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 550 | } |
| 551 | |
| 552 | void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) |
| 553 | { |
Ben Hutchings | 94b274b | 2011-01-10 21:18:20 +0000 | [diff] [blame] | 554 | if (!tx_queue->initialised) |
| 555 | return; |
| 556 | |
Ben Hutchings | 62776d0 | 2010-06-23 11:30:07 +0000 | [diff] [blame] | 557 | netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, |
| 558 | "shutting down TX queue %d\n", tx_queue->queue); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 559 | |
Ben Hutchings | 94b274b | 2011-01-10 21:18:20 +0000 | [diff] [blame] | 560 | tx_queue->initialised = false; |
| 561 | |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 562 | /* Flush TX queue, remove descriptor ring */ |
Ben Hutchings | 152b6a6 | 2009-11-29 03:43:56 +0000 | [diff] [blame] | 563 | efx_nic_fini_tx(tx_queue); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 564 | |
| 565 | efx_release_tx_buffers(tx_queue); |
| 566 | |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 567 | /* Free up TSO header cache */ |
| 568 | efx_fini_tso(tx_queue); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 569 | } |
| 570 | |
| 571 | void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) |
| 572 | { |
Ben Hutchings | 94b274b | 2011-01-10 21:18:20 +0000 | [diff] [blame] | 573 | if (!tx_queue->buffer) |
| 574 | return; |
| 575 | |
Ben Hutchings | 62776d0 | 2010-06-23 11:30:07 +0000 | [diff] [blame] | 576 | netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, |
| 577 | "destroying TX queue %d\n", tx_queue->queue); |
Ben Hutchings | 152b6a6 | 2009-11-29 03:43:56 +0000 | [diff] [blame] | 578 | efx_nic_remove_tx(tx_queue); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 579 | |
| 580 | kfree(tx_queue->buffer); |
| 581 | tx_queue->buffer = NULL; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 582 | } |
| 583 | |
| 584 | |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 585 | /* Efx TCP segmentation acceleration. |
| 586 | * |
| 587 | * Why? Because by doing it here in the driver we can go significantly |
| 588 | * faster than the GSO. |
| 589 | * |
| 590 | * Requires TX checksum offload support. |
| 591 | */ |
| 592 | |
| 593 | /* Number of bytes inserted at the start of a TSO header buffer, |
| 594 | * similar to NET_IP_ALIGN. |
| 595 | */ |
Ben Hutchings | 13e9ab1 | 2008-09-01 12:50:28 +0100 | [diff] [blame] | 596 | #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 597 | #define TSOH_OFFSET 0 |
| 598 | #else |
| 599 | #define TSOH_OFFSET NET_IP_ALIGN |
| 600 | #endif |
| 601 | |
| 602 | #define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET) |
| 603 | |
| 604 | /* Total size of struct efx_tso_header, buffer and padding */ |
| 605 | #define TSOH_SIZE(hdr_len) \ |
| 606 | (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len) |
| 607 | |
| 608 | /* Size of blocks on free list. Larger blocks must be allocated from |
| 609 | * the heap. |
| 610 | */ |
| 611 | #define TSOH_STD_SIZE 128 |
| 612 | |
| 613 | #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2)) |
| 614 | #define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data) |
| 615 | #define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data) |
| 616 | #define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data) |
Ben Hutchings | 738a8f4 | 2009-11-29 15:16:05 +0000 | [diff] [blame] | 617 | #define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data) |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 618 | |
| 619 | /** |
| 620 | * struct tso_state - TSO state for an SKB |
Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 621 | * @out_len: Remaining length in current segment |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 622 | * @seqnum: Current sequence number |
Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 623 | * @ipv4_id: Current IPv4 ID, host endian |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 624 | * @packet_space: Remaining space in current packet |
Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 625 | * @dma_addr: DMA address of current position |
| 626 | * @in_len: Remaining length in current SKB fragment |
| 627 | * @unmap_len: Length of SKB fragment |
| 628 | * @unmap_addr: DMA address of SKB fragment |
Ben Hutchings | 7668ff9 | 2012-05-17 20:52:20 +0100 | [diff] [blame^] | 629 | * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0 |
Ben Hutchings | 738a8f4 | 2009-11-29 15:16:05 +0000 | [diff] [blame] | 630 | * @protocol: Network protocol (after any VLAN header) |
Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 631 | * @header_len: Number of bytes of header |
| 632 | * @full_packet_size: Number of bytes to put in each outgoing segment |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 633 | * |
| 634 | * The state used during segmentation. It is put into this data structure |
| 635 | * just to make it easy to pass into inline functions. |
| 636 | */ |
| 637 | struct tso_state { |
Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 638 | /* Output position */ |
| 639 | unsigned out_len; |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 640 | unsigned seqnum; |
Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 641 | unsigned ipv4_id; |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 642 | unsigned packet_space; |
| 643 | |
Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 644 | /* Input position */ |
| 645 | dma_addr_t dma_addr; |
| 646 | unsigned in_len; |
| 647 | unsigned unmap_len; |
| 648 | dma_addr_t unmap_addr; |
Ben Hutchings | 7668ff9 | 2012-05-17 20:52:20 +0100 | [diff] [blame^] | 649 | unsigned short dma_flags; |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 650 | |
Ben Hutchings | 738a8f4 | 2009-11-29 15:16:05 +0000 | [diff] [blame] | 651 | __be16 protocol; |
Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 652 | unsigned header_len; |
| 653 | int full_packet_size; |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 654 | }; |
| 655 | |
| 656 | |
| 657 | /* |
| 658 | * Verify that our various assumptions about sk_buffs and the conditions |
Ben Hutchings | 738a8f4 | 2009-11-29 15:16:05 +0000 | [diff] [blame] | 659 | * under which TSO will be attempted hold true. Return the protocol number. |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 660 | */ |
Ben Hutchings | 738a8f4 | 2009-11-29 15:16:05 +0000 | [diff] [blame] | 661 | static __be16 efx_tso_check_protocol(struct sk_buff *skb) |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 662 | { |
Ben Hutchings | 740847d | 2008-09-01 12:48:23 +0100 | [diff] [blame] | 663 | __be16 protocol = skb->protocol; |
| 664 | |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 665 | EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto != |
Ben Hutchings | 740847d | 2008-09-01 12:48:23 +0100 | [diff] [blame] | 666 | protocol); |
| 667 | if (protocol == htons(ETH_P_8021Q)) { |
Ben Hutchings | 740847d | 2008-09-01 12:48:23 +0100 | [diff] [blame] | 668 | struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; |
| 669 | protocol = veh->h_vlan_encapsulated_proto; |
Ben Hutchings | 740847d | 2008-09-01 12:48:23 +0100 | [diff] [blame] | 670 | } |
| 671 | |
Ben Hutchings | 738a8f4 | 2009-11-29 15:16:05 +0000 | [diff] [blame] | 672 | if (protocol == htons(ETH_P_IP)) { |
| 673 | EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP); |
| 674 | } else { |
| 675 | EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6)); |
| 676 | EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP); |
| 677 | } |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 678 | EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data) |
| 679 | + (tcp_hdr(skb)->doff << 2u)) > |
| 680 | skb_headlen(skb)); |
Ben Hutchings | 738a8f4 | 2009-11-29 15:16:05 +0000 | [diff] [blame] | 681 | |
| 682 | return protocol; |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 683 | } |
| 684 | |
| 685 | |
| 686 | /* |
| 687 | * Allocate a page worth of efx_tso_header structures, and string them |
| 688 | * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM. |
| 689 | */ |
| 690 | static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue) |
| 691 | { |
Ben Hutchings | 0e33d87 | 2012-05-17 17:46:55 +0100 | [diff] [blame] | 692 | struct device *dma_dev = &tx_queue->efx->pci_dev->dev; |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 693 | struct efx_tso_header *tsoh; |
| 694 | dma_addr_t dma_addr; |
| 695 | u8 *base_kva, *kva; |
| 696 | |
Ben Hutchings | 0e33d87 | 2012-05-17 17:46:55 +0100 | [diff] [blame] | 697 | base_kva = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr, GFP_ATOMIC); |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 698 | if (base_kva == NULL) { |
Ben Hutchings | 62776d0 | 2010-06-23 11:30:07 +0000 | [diff] [blame] | 699 | netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev, |
| 700 | "Unable to allocate page for TSO headers\n"); |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 701 | return -ENOMEM; |
| 702 | } |
| 703 | |
Ben Hutchings | 0e33d87 | 2012-05-17 17:46:55 +0100 | [diff] [blame] | 704 | /* dma_alloc_coherent() allocates pages. */ |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 705 | EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u)); |
| 706 | |
| 707 | for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) { |
| 708 | tsoh = (struct efx_tso_header *)kva; |
| 709 | tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva); |
| 710 | tsoh->next = tx_queue->tso_headers_free; |
| 711 | tx_queue->tso_headers_free = tsoh; |
| 712 | } |
| 713 | |
| 714 | return 0; |
| 715 | } |
| 716 | |
| 717 | |
| 718 | /* Free up a TSO header, and all others in the same page. */ |
| 719 | static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue, |
| 720 | struct efx_tso_header *tsoh, |
Ben Hutchings | 0e33d87 | 2012-05-17 17:46:55 +0100 | [diff] [blame] | 721 | struct device *dma_dev) |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 722 | { |
| 723 | struct efx_tso_header **p; |
| 724 | unsigned long base_kva; |
| 725 | dma_addr_t base_dma; |
| 726 | |
| 727 | base_kva = (unsigned long)tsoh & PAGE_MASK; |
| 728 | base_dma = tsoh->dma_addr & PAGE_MASK; |
| 729 | |
| 730 | p = &tx_queue->tso_headers_free; |
Ben Hutchings | b347564 | 2008-05-16 21:15:49 +0100 | [diff] [blame] | 731 | while (*p != NULL) { |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 732 | if (((unsigned long)*p & PAGE_MASK) == base_kva) |
| 733 | *p = (*p)->next; |
| 734 | else |
| 735 | p = &(*p)->next; |
Ben Hutchings | b347564 | 2008-05-16 21:15:49 +0100 | [diff] [blame] | 736 | } |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 737 | |
Ben Hutchings | 0e33d87 | 2012-05-17 17:46:55 +0100 | [diff] [blame] | 738 | dma_free_coherent(dma_dev, PAGE_SIZE, (void *)base_kva, base_dma); |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 739 | } |
| 740 | |
| 741 | static struct efx_tso_header * |
| 742 | efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len) |
| 743 | { |
| 744 | struct efx_tso_header *tsoh; |
| 745 | |
| 746 | tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA); |
| 747 | if (unlikely(!tsoh)) |
| 748 | return NULL; |
| 749 | |
Ben Hutchings | 0e33d87 | 2012-05-17 17:46:55 +0100 | [diff] [blame] | 750 | tsoh->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev, |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 751 | TSOH_BUFFER(tsoh), header_len, |
Ben Hutchings | 0e33d87 | 2012-05-17 17:46:55 +0100 | [diff] [blame] | 752 | DMA_TO_DEVICE); |
| 753 | if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev, |
| 754 | tsoh->dma_addr))) { |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 755 | kfree(tsoh); |
| 756 | return NULL; |
| 757 | } |
| 758 | |
| 759 | tsoh->unmap_len = header_len; |
| 760 | return tsoh; |
| 761 | } |
| 762 | |
| 763 | static void |
| 764 | efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh) |
| 765 | { |
Ben Hutchings | 0e33d87 | 2012-05-17 17:46:55 +0100 | [diff] [blame] | 766 | dma_unmap_single(&tx_queue->efx->pci_dev->dev, |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 767 | tsoh->dma_addr, tsoh->unmap_len, |
Ben Hutchings | 0e33d87 | 2012-05-17 17:46:55 +0100 | [diff] [blame] | 768 | DMA_TO_DEVICE); |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 769 | kfree(tsoh); |
| 770 | } |
| 771 | |
| 772 | /** |
| 773 | * efx_tx_queue_insert - push descriptors onto the TX queue |
| 774 | * @tx_queue: Efx TX queue |
| 775 | * @dma_addr: DMA address of fragment |
| 776 | * @len: Length of fragment |
Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 777 | * @final_buffer: The final buffer inserted into the queue |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 778 | * |
| 779 | * Push descriptors onto the TX queue. Return 0 on success or 1 if |
| 780 | * @tx_queue full. |
| 781 | */ |
| 782 | static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, |
| 783 | dma_addr_t dma_addr, unsigned len, |
Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 784 | struct efx_tx_buffer **final_buffer) |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 785 | { |
| 786 | struct efx_tx_buffer *buffer; |
| 787 | struct efx_nic *efx = tx_queue->efx; |
Ben Hutchings | 63f1988 | 2009-10-23 08:31:20 +0000 | [diff] [blame] | 788 | unsigned dma_len, fill_level, insert_ptr; |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 789 | int q_space; |
| 790 | |
| 791 | EFX_BUG_ON_PARANOID(len <= 0); |
| 792 | |
| 793 | fill_level = tx_queue->insert_count - tx_queue->old_read_count; |
| 794 | /* -1 as there is no way to represent all descriptors used */ |
Steve Hodgson | ecc910f | 2010-09-10 06:42:22 +0000 | [diff] [blame] | 795 | q_space = efx->txq_entries - 1 - fill_level; |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 796 | |
| 797 | while (1) { |
| 798 | if (unlikely(q_space-- <= 0)) { |
| 799 | /* It might be that completions have happened |
| 800 | * since the xmit path last checked. Update |
| 801 | * the xmit path's copy of read_count. |
| 802 | */ |
Ben Hutchings | c04bfc6 | 2010-12-10 01:24:16 +0000 | [diff] [blame] | 803 | netif_tx_stop_queue(tx_queue->core_txq); |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 804 | /* This memory barrier protects the change of |
Ben Hutchings | c04bfc6 | 2010-12-10 01:24:16 +0000 | [diff] [blame] | 805 | * queue state from the access of read_count. */ |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 806 | smp_mb(); |
| 807 | tx_queue->old_read_count = |
Ben Hutchings | 51c56f4 | 2010-11-10 18:46:40 +0000 | [diff] [blame] | 808 | ACCESS_ONCE(tx_queue->read_count); |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 809 | fill_level = (tx_queue->insert_count |
| 810 | - tx_queue->old_read_count); |
Steve Hodgson | ecc910f | 2010-09-10 06:42:22 +0000 | [diff] [blame] | 811 | q_space = efx->txq_entries - 1 - fill_level; |
Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 812 | if (unlikely(q_space-- <= 0)) { |
| 813 | *final_buffer = NULL; |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 814 | return 1; |
Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 815 | } |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 816 | smp_mb(); |
Ben Hutchings | c04bfc6 | 2010-12-10 01:24:16 +0000 | [diff] [blame] | 817 | netif_tx_start_queue(tx_queue->core_txq); |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 818 | } |
| 819 | |
Steve Hodgson | ecc910f | 2010-09-10 06:42:22 +0000 | [diff] [blame] | 820 | insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 821 | buffer = &tx_queue->buffer[insert_ptr]; |
| 822 | ++tx_queue->insert_count; |
| 823 | |
| 824 | EFX_BUG_ON_PARANOID(tx_queue->insert_count - |
Steve Hodgson | ecc910f | 2010-09-10 06:42:22 +0000 | [diff] [blame] | 825 | tx_queue->read_count >= |
| 826 | efx->txq_entries); |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 827 | |
| 828 | efx_tsoh_free(tx_queue, buffer); |
| 829 | EFX_BUG_ON_PARANOID(buffer->len); |
| 830 | EFX_BUG_ON_PARANOID(buffer->unmap_len); |
Ben Hutchings | 7668ff9 | 2012-05-17 20:52:20 +0100 | [diff] [blame^] | 831 | EFX_BUG_ON_PARANOID(buffer->flags); |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 832 | |
| 833 | buffer->dma_addr = dma_addr; |
| 834 | |
Ben Hutchings | 63f1988 | 2009-10-23 08:31:20 +0000 | [diff] [blame] | 835 | dma_len = efx_max_tx_len(efx, dma_addr); |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 836 | |
| 837 | /* If there is enough space to send then do so */ |
| 838 | if (dma_len >= len) |
| 839 | break; |
| 840 | |
Ben Hutchings | 7668ff9 | 2012-05-17 20:52:20 +0100 | [diff] [blame^] | 841 | buffer->len = dma_len; |
| 842 | buffer->flags = EFX_TX_BUF_CONT; |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 843 | dma_addr += dma_len; |
| 844 | len -= dma_len; |
| 845 | } |
| 846 | |
| 847 | EFX_BUG_ON_PARANOID(!len); |
| 848 | buffer->len = len; |
Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 849 | *final_buffer = buffer; |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 850 | return 0; |
| 851 | } |
| 852 | |
| 853 | |
| 854 | /* |
| 855 | * Put a TSO header into the TX queue. |
| 856 | * |
| 857 | * This is special-cased because we know that it is small enough to fit in |
| 858 | * a single fragment, and we know it doesn't cross a page boundary. It |
| 859 | * also allows us to not worry about end-of-packet etc. |
| 860 | */ |
Ben Hutchings | 4d56606 | 2008-09-01 12:47:12 +0100 | [diff] [blame] | 861 | static void efx_tso_put_header(struct efx_tx_queue *tx_queue, |
| 862 | struct efx_tso_header *tsoh, unsigned len) |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 863 | { |
| 864 | struct efx_tx_buffer *buffer; |
| 865 | |
Steve Hodgson | ecc910f | 2010-09-10 06:42:22 +0000 | [diff] [blame] | 866 | buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask]; |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 867 | efx_tsoh_free(tx_queue, buffer); |
| 868 | EFX_BUG_ON_PARANOID(buffer->len); |
| 869 | EFX_BUG_ON_PARANOID(buffer->unmap_len); |
Ben Hutchings | 7668ff9 | 2012-05-17 20:52:20 +0100 | [diff] [blame^] | 870 | EFX_BUG_ON_PARANOID(buffer->flags); |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 871 | buffer->len = len; |
| 872 | buffer->dma_addr = tsoh->dma_addr; |
| 873 | buffer->tsoh = tsoh; |
Ben Hutchings | 7668ff9 | 2012-05-17 20:52:20 +0100 | [diff] [blame^] | 874 | buffer->flags = EFX_TX_BUF_TSOH | EFX_TX_BUF_CONT; |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 875 | |
| 876 | ++tx_queue->insert_count; |
| 877 | } |
| 878 | |
| 879 | |
| 880 | /* Remove descriptors put into a tx_queue. */ |
| 881 | static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) |
| 882 | { |
| 883 | struct efx_tx_buffer *buffer; |
Ben Hutchings | cc12dac | 2008-09-01 12:46:43 +0100 | [diff] [blame] | 884 | dma_addr_t unmap_addr; |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 885 | |
| 886 | /* Work backwards until we hit the original insert pointer value */ |
| 887 | while (tx_queue->insert_count != tx_queue->write_count) { |
| 888 | --tx_queue->insert_count; |
| 889 | buffer = &tx_queue->buffer[tx_queue->insert_count & |
Steve Hodgson | ecc910f | 2010-09-10 06:42:22 +0000 | [diff] [blame] | 890 | tx_queue->ptr_mask]; |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 891 | efx_tsoh_free(tx_queue, buffer); |
Ben Hutchings | 7668ff9 | 2012-05-17 20:52:20 +0100 | [diff] [blame^] | 892 | EFX_BUG_ON_PARANOID(buffer->flags & EFX_TX_BUF_SKB); |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 893 | if (buffer->unmap_len) { |
Ben Hutchings | cc12dac | 2008-09-01 12:46:43 +0100 | [diff] [blame] | 894 | unmap_addr = (buffer->dma_addr + buffer->len - |
| 895 | buffer->unmap_len); |
Ben Hutchings | 7668ff9 | 2012-05-17 20:52:20 +0100 | [diff] [blame^] | 896 | if (buffer->flags & EFX_TX_BUF_MAP_SINGLE) |
Ben Hutchings | 0e33d87 | 2012-05-17 17:46:55 +0100 | [diff] [blame] | 897 | dma_unmap_single(&tx_queue->efx->pci_dev->dev, |
Ben Hutchings | cc12dac | 2008-09-01 12:46:43 +0100 | [diff] [blame] | 898 | unmap_addr, buffer->unmap_len, |
Ben Hutchings | 0e33d87 | 2012-05-17 17:46:55 +0100 | [diff] [blame] | 899 | DMA_TO_DEVICE); |
Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 900 | else |
Ben Hutchings | 0e33d87 | 2012-05-17 17:46:55 +0100 | [diff] [blame] | 901 | dma_unmap_page(&tx_queue->efx->pci_dev->dev, |
Ben Hutchings | cc12dac | 2008-09-01 12:46:43 +0100 | [diff] [blame] | 902 | unmap_addr, buffer->unmap_len, |
Ben Hutchings | 0e33d87 | 2012-05-17 17:46:55 +0100 | [diff] [blame] | 903 | DMA_TO_DEVICE); |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 904 | buffer->unmap_len = 0; |
| 905 | } |
Neil Turton | a7ebd27 | 2009-12-23 13:47:13 +0000 | [diff] [blame] | 906 | buffer->len = 0; |
Ben Hutchings | 7668ff9 | 2012-05-17 20:52:20 +0100 | [diff] [blame^] | 907 | buffer->flags = 0; |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 908 | } |
| 909 | } |
| 910 | |
| 911 | |
| 912 | /* Parse the SKB header and initialise state. */ |
Ben Hutchings | 4d56606 | 2008-09-01 12:47:12 +0100 | [diff] [blame] | 913 | static void tso_start(struct tso_state *st, const struct sk_buff *skb) |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 914 | { |
| 915 | /* All ethernet/IP/TCP headers combined size is TCP header size |
| 916 | * plus offset of TCP header relative to start of packet. |
| 917 | */ |
Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 918 | st->header_len = ((tcp_hdr(skb)->doff << 2u) |
| 919 | + PTR_DIFF(tcp_hdr(skb), skb->data)); |
| 920 | st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size; |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 921 | |
Ben Hutchings | 738a8f4 | 2009-11-29 15:16:05 +0000 | [diff] [blame] | 922 | if (st->protocol == htons(ETH_P_IP)) |
| 923 | st->ipv4_id = ntohs(ip_hdr(skb)->id); |
| 924 | else |
| 925 | st->ipv4_id = 0; |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 926 | st->seqnum = ntohl(tcp_hdr(skb)->seq); |
| 927 | |
| 928 | EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg); |
| 929 | EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn); |
| 930 | EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst); |
| 931 | |
Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 932 | st->out_len = skb->len - st->header_len; |
| 933 | st->unmap_len = 0; |
Ben Hutchings | 7668ff9 | 2012-05-17 20:52:20 +0100 | [diff] [blame^] | 934 | st->dma_flags = 0; |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 935 | } |
| 936 | |
Ben Hutchings | 4d56606 | 2008-09-01 12:47:12 +0100 | [diff] [blame] | 937 | static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, |
| 938 | skb_frag_t *frag) |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 939 | { |
Ian Campbell | 4a22c4c | 2011-09-21 21:53:16 +0000 | [diff] [blame] | 940 | st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0, |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 941 | skb_frag_size(frag), DMA_TO_DEVICE); |
Ian Campbell | 5d6bcdf | 2011-10-06 11:10:48 +0100 | [diff] [blame] | 942 | if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) { |
Ben Hutchings | 7668ff9 | 2012-05-17 20:52:20 +0100 | [diff] [blame^] | 943 | st->dma_flags = 0; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 944 | st->unmap_len = skb_frag_size(frag); |
| 945 | st->in_len = skb_frag_size(frag); |
Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 946 | st->dma_addr = st->unmap_addr; |
Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 947 | return 0; |
| 948 | } |
| 949 | return -ENOMEM; |
| 950 | } |
| 951 | |
Ben Hutchings | 4d56606 | 2008-09-01 12:47:12 +0100 | [diff] [blame] | 952 | static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx, |
| 953 | const struct sk_buff *skb) |
Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 954 | { |
Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 955 | int hl = st->header_len; |
Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 956 | int len = skb_headlen(skb) - hl; |
| 957 | |
Ben Hutchings | 0e33d87 | 2012-05-17 17:46:55 +0100 | [diff] [blame] | 958 | st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl, |
| 959 | len, DMA_TO_DEVICE); |
| 960 | if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) { |
Ben Hutchings | 7668ff9 | 2012-05-17 20:52:20 +0100 | [diff] [blame^] | 961 | st->dma_flags = EFX_TX_BUF_MAP_SINGLE; |
Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 962 | st->unmap_len = len; |
| 963 | st->in_len = len; |
| 964 | st->dma_addr = st->unmap_addr; |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 965 | return 0; |
| 966 | } |
| 967 | return -ENOMEM; |
| 968 | } |
| 969 | |
| 970 | |
| 971 | /** |
| 972 | * tso_fill_packet_with_fragment - form descriptors for the current fragment |
| 973 | * @tx_queue: Efx TX queue |
| 974 | * @skb: Socket buffer |
| 975 | * @st: TSO state |
| 976 | * |
| 977 | * Form descriptors for the current fragment, until we reach the end |
| 978 | * of fragment or end-of-packet. Return 0 on success, 1 if not enough |
| 979 | * space in @tx_queue. |
| 980 | */ |
Ben Hutchings | 4d56606 | 2008-09-01 12:47:12 +0100 | [diff] [blame] | 981 | static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, |
| 982 | const struct sk_buff *skb, |
| 983 | struct tso_state *st) |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 984 | { |
Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 985 | struct efx_tx_buffer *buffer; |
Ben Hutchings | 7668ff9 | 2012-05-17 20:52:20 +0100 | [diff] [blame^] | 986 | int n, rc; |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 987 | |
Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 988 | if (st->in_len == 0) |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 989 | return 0; |
| 990 | if (st->packet_space == 0) |
| 991 | return 0; |
| 992 | |
Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 993 | EFX_BUG_ON_PARANOID(st->in_len <= 0); |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 994 | EFX_BUG_ON_PARANOID(st->packet_space <= 0); |
| 995 | |
Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 996 | n = min(st->in_len, st->packet_space); |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 997 | |
| 998 | st->packet_space -= n; |
Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 999 | st->out_len -= n; |
| 1000 | st->in_len -= n; |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1001 | |
Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 1002 | rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer); |
Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 1003 | if (likely(rc == 0)) { |
Ben Hutchings | 7668ff9 | 2012-05-17 20:52:20 +0100 | [diff] [blame^] | 1004 | if (st->out_len == 0) { |
Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 1005 | /* Transfer ownership of the skb */ |
| 1006 | buffer->skb = skb; |
Ben Hutchings | 7668ff9 | 2012-05-17 20:52:20 +0100 | [diff] [blame^] | 1007 | buffer->flags = EFX_TX_BUF_SKB; |
| 1008 | } else if (st->packet_space != 0) { |
| 1009 | buffer->flags = EFX_TX_BUF_CONT; |
| 1010 | } |
Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 1011 | |
Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 1012 | if (st->in_len == 0) { |
Ben Hutchings | 0e33d87 | 2012-05-17 17:46:55 +0100 | [diff] [blame] | 1013 | /* Transfer ownership of the DMA mapping */ |
Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 1014 | buffer->unmap_len = st->unmap_len; |
Ben Hutchings | 7668ff9 | 2012-05-17 20:52:20 +0100 | [diff] [blame^] | 1015 | buffer->flags |= st->dma_flags; |
Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 1016 | st->unmap_len = 0; |
Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 1017 | } |
| 1018 | } |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1019 | |
Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 1020 | st->dma_addr += n; |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1021 | return rc; |
| 1022 | } |
| 1023 | |
| 1024 | |
| 1025 | /** |
| 1026 | * tso_start_new_packet - generate a new header and prepare for the new packet |
| 1027 | * @tx_queue: Efx TX queue |
| 1028 | * @skb: Socket buffer |
| 1029 | * @st: TSO state |
| 1030 | * |
| 1031 | * Generate a new header and prepare for the new packet. Return 0 on |
| 1032 | * success, or -1 if failed to alloc header. |
| 1033 | */ |
Ben Hutchings | 4d56606 | 2008-09-01 12:47:12 +0100 | [diff] [blame] | 1034 | static int tso_start_new_packet(struct efx_tx_queue *tx_queue, |
| 1035 | const struct sk_buff *skb, |
| 1036 | struct tso_state *st) |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1037 | { |
| 1038 | struct efx_tso_header *tsoh; |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1039 | struct tcphdr *tsoh_th; |
| 1040 | unsigned ip_length; |
| 1041 | u8 *header; |
| 1042 | |
| 1043 | /* Allocate a DMA-mapped header buffer. */ |
Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 1044 | if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) { |
Ben Hutchings | b347564 | 2008-05-16 21:15:49 +0100 | [diff] [blame] | 1045 | if (tx_queue->tso_headers_free == NULL) { |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1046 | if (efx_tsoh_block_alloc(tx_queue)) |
| 1047 | return -1; |
Ben Hutchings | b347564 | 2008-05-16 21:15:49 +0100 | [diff] [blame] | 1048 | } |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1049 | EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free); |
| 1050 | tsoh = tx_queue->tso_headers_free; |
| 1051 | tx_queue->tso_headers_free = tsoh->next; |
| 1052 | tsoh->unmap_len = 0; |
| 1053 | } else { |
| 1054 | tx_queue->tso_long_headers++; |
Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 1055 | tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len); |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1056 | if (unlikely(!tsoh)) |
| 1057 | return -1; |
| 1058 | } |
| 1059 | |
| 1060 | header = TSOH_BUFFER(tsoh); |
| 1061 | tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb)); |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1062 | |
| 1063 | /* Copy and update the headers. */ |
Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 1064 | memcpy(header, skb->data, st->header_len); |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1065 | |
| 1066 | tsoh_th->seq = htonl(st->seqnum); |
| 1067 | st->seqnum += skb_shinfo(skb)->gso_size; |
Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 1068 | if (st->out_len > skb_shinfo(skb)->gso_size) { |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1069 | /* This packet will not finish the TSO burst. */ |
Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 1070 | ip_length = st->full_packet_size - ETH_HDR_LEN(skb); |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1071 | tsoh_th->fin = 0; |
| 1072 | tsoh_th->psh = 0; |
| 1073 | } else { |
| 1074 | /* This packet will be the last in the TSO burst. */ |
Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 1075 | ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len; |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1076 | tsoh_th->fin = tcp_hdr(skb)->fin; |
| 1077 | tsoh_th->psh = tcp_hdr(skb)->psh; |
| 1078 | } |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1079 | |
Ben Hutchings | 738a8f4 | 2009-11-29 15:16:05 +0000 | [diff] [blame] | 1080 | if (st->protocol == htons(ETH_P_IP)) { |
| 1081 | struct iphdr *tsoh_iph = |
| 1082 | (struct iphdr *)(header + SKB_IPV4_OFF(skb)); |
| 1083 | |
| 1084 | tsoh_iph->tot_len = htons(ip_length); |
| 1085 | |
| 1086 | /* Linux leaves suitable gaps in the IP ID space for us to fill. */ |
| 1087 | tsoh_iph->id = htons(st->ipv4_id); |
| 1088 | st->ipv4_id++; |
| 1089 | } else { |
| 1090 | struct ipv6hdr *tsoh_iph = |
| 1091 | (struct ipv6hdr *)(header + SKB_IPV6_OFF(skb)); |
| 1092 | |
| 1093 | tsoh_iph->payload_len = htons(ip_length - sizeof(*tsoh_iph)); |
| 1094 | } |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1095 | |
| 1096 | st->packet_space = skb_shinfo(skb)->gso_size; |
| 1097 | ++tx_queue->tso_packets; |
| 1098 | |
| 1099 | /* Form a descriptor for this header. */ |
Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 1100 | efx_tso_put_header(tx_queue, tsoh, st->header_len); |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1101 | |
| 1102 | return 0; |
| 1103 | } |
| 1104 | |
| 1105 | |
| 1106 | /** |
| 1107 | * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer |
| 1108 | * @tx_queue: Efx TX queue |
| 1109 | * @skb: Socket buffer |
| 1110 | * |
| 1111 | * Context: You must hold netif_tx_lock() to call this function. |
| 1112 | * |
| 1113 | * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if |
| 1114 | * @skb was not enqueued. In all cases @skb is consumed. Return |
| 1115 | * %NETDEV_TX_OK or %NETDEV_TX_BUSY. |
| 1116 | */ |
| 1117 | static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, |
Ben Hutchings | 740847d | 2008-09-01 12:48:23 +0100 | [diff] [blame] | 1118 | struct sk_buff *skb) |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1119 | { |
Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 1120 | struct efx_nic *efx = tx_queue->efx; |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1121 | int frag_i, rc, rc2 = NETDEV_TX_OK; |
| 1122 | struct tso_state state; |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1123 | |
Ben Hutchings | 738a8f4 | 2009-11-29 15:16:05 +0000 | [diff] [blame] | 1124 | /* Find the packet protocol and sanity-check it */ |
| 1125 | state.protocol = efx_tso_check_protocol(skb); |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1126 | |
| 1127 | EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); |
| 1128 | |
| 1129 | tso_start(&state, skb); |
| 1130 | |
| 1131 | /* Assume that skb header area contains exactly the headers, and |
| 1132 | * all payload is in the frag list. |
| 1133 | */ |
Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 1134 | if (skb_headlen(skb) == state.header_len) { |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1135 | /* Grab the first payload fragment. */ |
| 1136 | EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1); |
| 1137 | frag_i = 0; |
Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 1138 | rc = tso_get_fragment(&state, efx, |
| 1139 | skb_shinfo(skb)->frags + frag_i); |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1140 | if (rc) |
| 1141 | goto mem_err; |
| 1142 | } else { |
Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 1143 | rc = tso_get_head_fragment(&state, efx, skb); |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1144 | if (rc) |
| 1145 | goto mem_err; |
| 1146 | frag_i = -1; |
| 1147 | } |
| 1148 | |
| 1149 | if (tso_start_new_packet(tx_queue, skb, &state) < 0) |
| 1150 | goto mem_err; |
| 1151 | |
| 1152 | while (1) { |
| 1153 | rc = tso_fill_packet_with_fragment(tx_queue, skb, &state); |
Ben Hutchings | c04bfc6 | 2010-12-10 01:24:16 +0000 | [diff] [blame] | 1154 | if (unlikely(rc)) { |
| 1155 | rc2 = NETDEV_TX_BUSY; |
| 1156 | goto unwind; |
| 1157 | } |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1158 | |
| 1159 | /* Move onto the next fragment? */ |
Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 1160 | if (state.in_len == 0) { |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1161 | if (++frag_i >= skb_shinfo(skb)->nr_frags) |
| 1162 | /* End of payload reached. */ |
| 1163 | break; |
Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 1164 | rc = tso_get_fragment(&state, efx, |
| 1165 | skb_shinfo(skb)->frags + frag_i); |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1166 | if (rc) |
| 1167 | goto mem_err; |
| 1168 | } |
| 1169 | |
| 1170 | /* Start at new packet? */ |
| 1171 | if (state.packet_space == 0 && |
| 1172 | tso_start_new_packet(tx_queue, skb, &state) < 0) |
| 1173 | goto mem_err; |
| 1174 | } |
| 1175 | |
Eric Dumazet | 449fa02 | 2011-11-30 17:12:27 -0500 | [diff] [blame] | 1176 | netdev_tx_sent_queue(tx_queue->core_txq, skb->len); |
| 1177 | |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1178 | /* Pass off to hardware */ |
Ben Hutchings | 152b6a6 | 2009-11-29 03:43:56 +0000 | [diff] [blame] | 1179 | efx_nic_push_buffers(tx_queue); |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1180 | |
| 1181 | tx_queue->tso_bursts++; |
| 1182 | return NETDEV_TX_OK; |
| 1183 | |
| 1184 | mem_err: |
Ben Hutchings | 62776d0 | 2010-06-23 11:30:07 +0000 | [diff] [blame] | 1185 | netif_err(efx, tx_err, efx->net_dev, |
Ben Hutchings | 0e33d87 | 2012-05-17 17:46:55 +0100 | [diff] [blame] | 1186 | "Out of memory for TSO headers, or DMA mapping error\n"); |
Ben Hutchings | 9bc183d | 2009-11-23 16:06:47 +0000 | [diff] [blame] | 1187 | dev_kfree_skb_any(skb); |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1188 | |
| 1189 | unwind: |
Ben Hutchings | 5988b63 | 2008-09-01 12:46:36 +0100 | [diff] [blame] | 1190 | /* Free the DMA mapping we were in the process of writing out */ |
Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 1191 | if (state.unmap_len) { |
Ben Hutchings | 7668ff9 | 2012-05-17 20:52:20 +0100 | [diff] [blame^] | 1192 | if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE) |
Ben Hutchings | 0e33d87 | 2012-05-17 17:46:55 +0100 | [diff] [blame] | 1193 | dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr, |
| 1194 | state.unmap_len, DMA_TO_DEVICE); |
Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 1195 | else |
Ben Hutchings | 0e33d87 | 2012-05-17 17:46:55 +0100 | [diff] [blame] | 1196 | dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr, |
| 1197 | state.unmap_len, DMA_TO_DEVICE); |
Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 1198 | } |
Ben Hutchings | 5988b63 | 2008-09-01 12:46:36 +0100 | [diff] [blame] | 1199 | |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1200 | efx_enqueue_unwind(tx_queue); |
| 1201 | return rc2; |
| 1202 | } |
| 1203 | |
| 1204 | |
| 1205 | /* |
| 1206 | * Free up all TSO datastructures associated with tx_queue. This |
| 1207 | * routine should be called only once the tx_queue is both empty and |
| 1208 | * will no longer be used. |
| 1209 | */ |
| 1210 | static void efx_fini_tso(struct efx_tx_queue *tx_queue) |
| 1211 | { |
| 1212 | unsigned i; |
| 1213 | |
Ben Hutchings | b347564 | 2008-05-16 21:15:49 +0100 | [diff] [blame] | 1214 | if (tx_queue->buffer) { |
Steve Hodgson | ecc910f | 2010-09-10 06:42:22 +0000 | [diff] [blame] | 1215 | for (i = 0; i <= tx_queue->ptr_mask; ++i) |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1216 | efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); |
Ben Hutchings | b347564 | 2008-05-16 21:15:49 +0100 | [diff] [blame] | 1217 | } |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1218 | |
| 1219 | while (tx_queue->tso_headers_free != NULL) |
| 1220 | efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free, |
Ben Hutchings | 0e33d87 | 2012-05-17 17:46:55 +0100 | [diff] [blame] | 1221 | &tx_queue->efx->pci_dev->dev); |
Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1222 | } |