| Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* Copyright (c) 2018, Intel Corporation. */ |
| 3 | |
| 4 | /* The driver transmit and receive code */ |
| 5 | |
| 6 | #include <linux/prefetch.h> |
| 7 | #include <linux/mm.h> |
| 8 | #include "ice.h" |
| 9 | |
| 10 | /** |
| 11 | * ice_unmap_and_free_tx_buf - Release a Tx buffer |
| 12 | * @ring: the ring that owns the buffer |
| 13 | * @tx_buf: the buffer to free |
| 14 | */ |
| 15 | static void |
| 16 | ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf) |
| 17 | { |
| 18 | if (tx_buf->skb) { |
| 19 | dev_kfree_skb_any(tx_buf->skb); |
| 20 | if (dma_unmap_len(tx_buf, len)) |
| 21 | dma_unmap_single(ring->dev, |
| 22 | dma_unmap_addr(tx_buf, dma), |
| 23 | dma_unmap_len(tx_buf, len), |
| 24 | DMA_TO_DEVICE); |
| 25 | } else if (dma_unmap_len(tx_buf, len)) { |
| 26 | dma_unmap_page(ring->dev, |
| 27 | dma_unmap_addr(tx_buf, dma), |
| 28 | dma_unmap_len(tx_buf, len), |
| 29 | DMA_TO_DEVICE); |
| 30 | } |
| 31 | |
| 32 | tx_buf->next_to_watch = NULL; |
| 33 | tx_buf->skb = NULL; |
| 34 | dma_unmap_len_set(tx_buf, len, 0); |
| 35 | /* tx_buf must be completely set up in the transmit path */ |
| 36 | } |
| 37 | |
| 38 | static struct netdev_queue *txring_txq(const struct ice_ring *ring) |
| 39 | { |
| 40 | return netdev_get_tx_queue(ring->netdev, ring->q_index); |
| 41 | } |
| 42 | |
| 43 | /** |
| 44 | * ice_clean_tx_ring - Free any empty Tx buffers |
| 45 | * @tx_ring: ring to be cleaned |
| 46 | */ |
| 47 | void ice_clean_tx_ring(struct ice_ring *tx_ring) |
| 48 | { |
| 49 | unsigned long size; |
| 50 | u16 i; |
| 51 | |
| 52 | /* ring already cleared, nothing to do */ |
| 53 | if (!tx_ring->tx_buf) |
| 54 | return; |
| 55 | |
| 56 | /* Free all the Tx ring sk_bufss */ |
| 57 | for (i = 0; i < tx_ring->count; i++) |
| 58 | ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); |
| 59 | |
| 60 | size = sizeof(struct ice_tx_buf) * tx_ring->count; |
| 61 | memset(tx_ring->tx_buf, 0, size); |
| 62 | |
| 63 | /* Zero out the descriptor ring */ |
| 64 | memset(tx_ring->desc, 0, tx_ring->size); |
| 65 | |
| 66 | tx_ring->next_to_use = 0; |
| 67 | tx_ring->next_to_clean = 0; |
| 68 | |
| 69 | if (!tx_ring->netdev) |
| 70 | return; |
| 71 | |
| 72 | /* cleanup Tx queue statistics */ |
| 73 | netdev_tx_reset_queue(txring_txq(tx_ring)); |
| 74 | } |
| 75 | |
| 76 | /** |
| 77 | * ice_free_tx_ring - Free Tx resources per queue |
| 78 | * @tx_ring: Tx descriptor ring for a specific queue |
| 79 | * |
| 80 | * Free all transmit software resources |
| 81 | */ |
| 82 | void ice_free_tx_ring(struct ice_ring *tx_ring) |
| 83 | { |
| 84 | ice_clean_tx_ring(tx_ring); |
| 85 | devm_kfree(tx_ring->dev, tx_ring->tx_buf); |
| 86 | tx_ring->tx_buf = NULL; |
| 87 | |
| 88 | if (tx_ring->desc) { |
| 89 | dmam_free_coherent(tx_ring->dev, tx_ring->size, |
| 90 | tx_ring->desc, tx_ring->dma); |
| 91 | tx_ring->desc = NULL; |
| 92 | } |
| 93 | } |
| 94 | |
| 95 | /** |
| 96 | * ice_setup_tx_ring - Allocate the Tx descriptors |
| 97 | * @tx_ring: the tx ring to set up |
| 98 | * |
| 99 | * Return 0 on success, negative on error |
| 100 | */ |
| 101 | int ice_setup_tx_ring(struct ice_ring *tx_ring) |
| 102 | { |
| 103 | struct device *dev = tx_ring->dev; |
| 104 | int bi_size; |
| 105 | |
| 106 | if (!dev) |
| 107 | return -ENOMEM; |
| 108 | |
| 109 | /* warn if we are about to overwrite the pointer */ |
| 110 | WARN_ON(tx_ring->tx_buf); |
| 111 | bi_size = sizeof(struct ice_tx_buf) * tx_ring->count; |
| 112 | tx_ring->tx_buf = devm_kzalloc(dev, bi_size, GFP_KERNEL); |
| 113 | if (!tx_ring->tx_buf) |
| 114 | return -ENOMEM; |
| 115 | |
| 116 | /* round up to nearest 4K */ |
| 117 | tx_ring->size = tx_ring->count * sizeof(struct ice_tx_desc); |
| 118 | tx_ring->size = ALIGN(tx_ring->size, 4096); |
| 119 | tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, |
| 120 | GFP_KERNEL); |
| 121 | if (!tx_ring->desc) { |
| 122 | dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", |
| 123 | tx_ring->size); |
| 124 | goto err; |
| 125 | } |
| 126 | |
| 127 | tx_ring->next_to_use = 0; |
| 128 | tx_ring->next_to_clean = 0; |
| 129 | return 0; |
| 130 | |
| 131 | err: |
| 132 | devm_kfree(dev, tx_ring->tx_buf); |
| 133 | tx_ring->tx_buf = NULL; |
| 134 | return -ENOMEM; |
| 135 | } |
| 136 | |
| 137 | /** |
| 138 | * ice_clean_rx_ring - Free Rx buffers |
| 139 | * @rx_ring: ring to be cleaned |
| 140 | */ |
| 141 | void ice_clean_rx_ring(struct ice_ring *rx_ring) |
| 142 | { |
| 143 | struct device *dev = rx_ring->dev; |
| 144 | unsigned long size; |
| 145 | u16 i; |
| 146 | |
| 147 | /* ring already cleared, nothing to do */ |
| 148 | if (!rx_ring->rx_buf) |
| 149 | return; |
| 150 | |
| 151 | /* Free all the Rx ring sk_buffs */ |
| 152 | for (i = 0; i < rx_ring->count; i++) { |
| 153 | struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; |
| 154 | |
| 155 | if (rx_buf->skb) { |
| 156 | dev_kfree_skb(rx_buf->skb); |
| 157 | rx_buf->skb = NULL; |
| 158 | } |
| 159 | if (!rx_buf->page) |
| 160 | continue; |
| 161 | |
| 162 | dma_unmap_page(dev, rx_buf->dma, PAGE_SIZE, DMA_FROM_DEVICE); |
| 163 | __free_pages(rx_buf->page, 0); |
| 164 | |
| 165 | rx_buf->page = NULL; |
| 166 | rx_buf->page_offset = 0; |
| 167 | } |
| 168 | |
| 169 | size = sizeof(struct ice_rx_buf) * rx_ring->count; |
| 170 | memset(rx_ring->rx_buf, 0, size); |
| 171 | |
| 172 | /* Zero out the descriptor ring */ |
| 173 | memset(rx_ring->desc, 0, rx_ring->size); |
| 174 | |
| 175 | rx_ring->next_to_alloc = 0; |
| 176 | rx_ring->next_to_clean = 0; |
| 177 | rx_ring->next_to_use = 0; |
| 178 | } |
| 179 | |
| 180 | /** |
| 181 | * ice_free_rx_ring - Free Rx resources |
| 182 | * @rx_ring: ring to clean the resources from |
| 183 | * |
| 184 | * Free all receive software resources |
| 185 | */ |
| 186 | void ice_free_rx_ring(struct ice_ring *rx_ring) |
| 187 | { |
| 188 | ice_clean_rx_ring(rx_ring); |
| 189 | devm_kfree(rx_ring->dev, rx_ring->rx_buf); |
| 190 | rx_ring->rx_buf = NULL; |
| 191 | |
| 192 | if (rx_ring->desc) { |
| 193 | dmam_free_coherent(rx_ring->dev, rx_ring->size, |
| 194 | rx_ring->desc, rx_ring->dma); |
| 195 | rx_ring->desc = NULL; |
| 196 | } |
| 197 | } |
| 198 | |
| 199 | /** |
| 200 | * ice_setup_rx_ring - Allocate the Rx descriptors |
| 201 | * @rx_ring: the rx ring to set up |
| 202 | * |
| 203 | * Return 0 on success, negative on error |
| 204 | */ |
| 205 | int ice_setup_rx_ring(struct ice_ring *rx_ring) |
| 206 | { |
| 207 | struct device *dev = rx_ring->dev; |
| 208 | int bi_size; |
| 209 | |
| 210 | if (!dev) |
| 211 | return -ENOMEM; |
| 212 | |
| 213 | /* warn if we are about to overwrite the pointer */ |
| 214 | WARN_ON(rx_ring->rx_buf); |
| 215 | bi_size = sizeof(struct ice_rx_buf) * rx_ring->count; |
| 216 | rx_ring->rx_buf = devm_kzalloc(dev, bi_size, GFP_KERNEL); |
| 217 | if (!rx_ring->rx_buf) |
| 218 | return -ENOMEM; |
| 219 | |
| 220 | /* round up to nearest 4K */ |
| 221 | rx_ring->size = rx_ring->count * sizeof(union ice_32byte_rx_desc); |
| 222 | rx_ring->size = ALIGN(rx_ring->size, 4096); |
| 223 | rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, |
| 224 | GFP_KERNEL); |
| 225 | if (!rx_ring->desc) { |
| 226 | dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", |
| 227 | rx_ring->size); |
| 228 | goto err; |
| 229 | } |
| 230 | |
| 231 | rx_ring->next_to_use = 0; |
| 232 | rx_ring->next_to_clean = 0; |
| 233 | return 0; |
| 234 | |
| 235 | err: |
| 236 | devm_kfree(dev, rx_ring->rx_buf); |
| 237 | rx_ring->rx_buf = NULL; |
| 238 | return -ENOMEM; |
| 239 | } |
| 240 | |
| 241 | /** |
| 242 | * ice_release_rx_desc - Store the new tail and head values |
| 243 | * @rx_ring: ring to bump |
| 244 | * @val: new head index |
| 245 | */ |
| 246 | static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val) |
| 247 | { |
| 248 | rx_ring->next_to_use = val; |
| 249 | |
| 250 | /* update next to alloc since we have filled the ring */ |
| 251 | rx_ring->next_to_alloc = val; |
| 252 | |
| 253 | /* Force memory writes to complete before letting h/w |
| 254 | * know there are new descriptors to fetch. (Only |
| 255 | * applicable for weak-ordered memory model archs, |
| 256 | * such as IA-64). |
| 257 | */ |
| 258 | wmb(); |
| 259 | writel(val, rx_ring->tail); |
| 260 | } |
| 261 | |
| 262 | /** |
| 263 | * ice_alloc_mapped_page - recycle or make a new page |
| 264 | * @rx_ring: ring to use |
| 265 | * @bi: rx_buf struct to modify |
| 266 | * |
| 267 | * Returns true if the page was successfully allocated or |
| 268 | * reused. |
| 269 | */ |
| 270 | static bool ice_alloc_mapped_page(struct ice_ring *rx_ring, |
| 271 | struct ice_rx_buf *bi) |
| 272 | { |
| 273 | struct page *page = bi->page; |
| 274 | dma_addr_t dma; |
| 275 | |
| 276 | /* since we are recycling buffers we should seldom need to alloc */ |
| 277 | if (likely(page)) |
| 278 | return true; |
| 279 | |
| 280 | /* alloc new page for storage */ |
| 281 | page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); |
| 282 | if (unlikely(!page)) |
| 283 | return false; |
| 284 | |
| 285 | /* map page for use */ |
| 286 | dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); |
| 287 | |
| 288 | /* if mapping failed free memory back to system since |
| 289 | * there isn't much point in holding memory we can't use |
| 290 | */ |
| 291 | if (dma_mapping_error(rx_ring->dev, dma)) { |
| 292 | __free_pages(page, 0); |
| 293 | return false; |
| 294 | } |
| 295 | |
| 296 | bi->dma = dma; |
| 297 | bi->page = page; |
| 298 | bi->page_offset = 0; |
| 299 | |
| 300 | return true; |
| 301 | } |
| 302 | |
| 303 | /** |
| 304 | * ice_alloc_rx_bufs - Replace used receive buffers |
| 305 | * @rx_ring: ring to place buffers on |
| 306 | * @cleaned_count: number of buffers to replace |
| 307 | * |
| 308 | * Returns false if all allocations were successful, true if any fail |
| 309 | */ |
| 310 | bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) |
| 311 | { |
| 312 | union ice_32b_rx_flex_desc *rx_desc; |
| 313 | u16 ntu = rx_ring->next_to_use; |
| 314 | struct ice_rx_buf *bi; |
| 315 | |
| 316 | /* do nothing if no valid netdev defined */ |
| 317 | if (!rx_ring->netdev || !cleaned_count) |
| 318 | return false; |
| 319 | |
| 320 | /* get the RX descriptor and buffer based on next_to_use */ |
| 321 | rx_desc = ICE_RX_DESC(rx_ring, ntu); |
| 322 | bi = &rx_ring->rx_buf[ntu]; |
| 323 | |
| 324 | do { |
| 325 | if (!ice_alloc_mapped_page(rx_ring, bi)) |
| 326 | goto no_bufs; |
| 327 | |
| 328 | /* Refresh the desc even if buffer_addrs didn't change |
| 329 | * because each write-back erases this info. |
| 330 | */ |
| 331 | rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); |
| 332 | |
| 333 | rx_desc++; |
| 334 | bi++; |
| 335 | ntu++; |
| 336 | if (unlikely(ntu == rx_ring->count)) { |
| 337 | rx_desc = ICE_RX_DESC(rx_ring, 0); |
| 338 | bi = rx_ring->rx_buf; |
| 339 | ntu = 0; |
| 340 | } |
| 341 | |
| 342 | /* clear the status bits for the next_to_use descriptor */ |
| 343 | rx_desc->wb.status_error0 = 0; |
| 344 | |
| 345 | cleaned_count--; |
| 346 | } while (cleaned_count); |
| 347 | |
| 348 | if (rx_ring->next_to_use != ntu) |
| 349 | ice_release_rx_desc(rx_ring, ntu); |
| 350 | |
| 351 | return false; |
| 352 | |
| 353 | no_bufs: |
| 354 | if (rx_ring->next_to_use != ntu) |
| 355 | ice_release_rx_desc(rx_ring, ntu); |
| 356 | |
| 357 | /* make sure to come back via polling to try again after |
| 358 | * allocation failure |
| 359 | */ |
| 360 | return true; |
| 361 | } |