Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2007, 2008, 2009 QLogic Corporation. All rights reserved. |
| 3 | * |
| 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the |
| 8 | * OpenIB.org BSD license below: |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or |
| 11 | * without modification, are permitted provided that the following |
| 12 | * conditions are met: |
| 13 | * |
| 14 | * - Redistributions of source code must retain the above |
| 15 | * copyright notice, this list of conditions and the following |
| 16 | * disclaimer. |
| 17 | * |
| 18 | * - Redistributions in binary form must reproduce the above |
| 19 | * copyright notice, this list of conditions and the following |
| 20 | * disclaimer in the documentation and/or other materials |
| 21 | * provided with the distribution. |
| 22 | * |
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 30 | * SOFTWARE. |
| 31 | */ |
| 32 | #include <linux/mm.h> |
| 33 | #include <linux/types.h> |
| 34 | #include <linux/device.h> |
| 35 | #include <linux/dmapool.h> |
| 36 | #include <linux/slab.h> |
| 37 | #include <linux/list.h> |
| 38 | #include <linux/highmem.h> |
| 39 | #include <linux/io.h> |
| 40 | #include <linux/uio.h> |
| 41 | #include <linux/rbtree.h> |
| 42 | #include <linux/spinlock.h> |
| 43 | #include <linux/delay.h> |
| 44 | |
| 45 | #include "qib.h" |
| 46 | #include "qib_user_sdma.h" |
| 47 | |
| 48 | /* minimum size of header */ |
| 49 | #define QIB_USER_SDMA_MIN_HEADER_LENGTH 64 |
| 50 | /* expected size of headers (for dma_pool) */ |
| 51 | #define QIB_USER_SDMA_EXP_HEADER_LENGTH 64 |
| 52 | /* attempt to drain the queue for 5secs */ |
| 53 | #define QIB_USER_SDMA_DRAIN_TIMEOUT 500 |
| 54 | |
| 55 | struct qib_user_sdma_pkt { |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 56 | struct list_head list; /* list element */ |
| 57 | |
| 58 | u8 tiddma; /* if this is NEW tid-sdma */ |
| 59 | u8 largepkt; /* this is large pkt from kmalloc */ |
| 60 | u16 frag_size; /* frag size used by PSM */ |
| 61 | u16 index; /* last header index or push index */ |
| 62 | u16 naddr; /* dimension of addr (1..3) ... */ |
| 63 | u16 addrlimit; /* addr array size */ |
| 64 | u16 tidsmidx; /* current tidsm index */ |
| 65 | u16 tidsmcount; /* tidsm array item count */ |
| 66 | u16 payload_size; /* payload size so far for header */ |
| 67 | u32 bytes_togo; /* bytes for processing */ |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 68 | u32 counter; /* sdma pkts queued counter for this entry */ |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 69 | struct qib_tid_session_member *tidsm; /* tid session member array */ |
| 70 | struct qib_user_sdma_queue *pq; /* which pq this pkt belongs to */ |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 71 | u64 added; /* global descq number of entries */ |
| 72 | |
| 73 | struct { |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 74 | u16 offset; /* offset for kvaddr, addr */ |
| 75 | u16 length; /* length in page */ |
| 76 | u16 first_desc; /* first desc */ |
| 77 | u16 last_desc; /* last desc */ |
| 78 | u16 put_page; /* should we put_page? */ |
| 79 | u16 dma_mapped; /* is page dma_mapped? */ |
| 80 | u16 dma_length; /* for dma_unmap_page() */ |
| 81 | u16 padding; |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 82 | struct page *page; /* may be NULL (coherent mem) */ |
| 83 | void *kvaddr; /* FIXME: only for pio hack */ |
| 84 | dma_addr_t addr; |
| 85 | } addr[4]; /* max pages, any more and we coalesce */ |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 86 | }; |
| 87 | |
| 88 | struct qib_user_sdma_queue { |
| 89 | /* |
| 90 | * pkts sent to dma engine are queued on this |
| 91 | * list head. the type of the elements of this |
| 92 | * list are struct qib_user_sdma_pkt... |
| 93 | */ |
| 94 | struct list_head sent; |
| 95 | |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 96 | /* |
| 97 | * Because above list will be accessed by both process and |
| 98 | * signal handler, we need a spinlock for it. |
| 99 | */ |
| 100 | spinlock_t sent_lock ____cacheline_aligned_in_smp; |
| 101 | |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 102 | /* headers with expected length are allocated from here... */ |
| 103 | char header_cache_name[64]; |
| 104 | struct dma_pool *header_cache; |
| 105 | |
| 106 | /* packets are allocated from the slab cache... */ |
| 107 | char pkt_slab_name[64]; |
| 108 | struct kmem_cache *pkt_slab; |
| 109 | |
| 110 | /* as packets go on the queued queue, they are counted... */ |
| 111 | u32 counter; |
| 112 | u32 sent_counter; |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 113 | /* pending packets, not sending yet */ |
| 114 | u32 num_pending; |
| 115 | /* sending packets, not complete yet */ |
| 116 | u32 num_sending; |
| 117 | /* global descq number of entry of last sending packet */ |
| 118 | u64 added; |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 119 | |
| 120 | /* dma page table */ |
| 121 | struct rb_root dma_pages_root; |
| 122 | |
| 123 | /* protect everything above... */ |
| 124 | struct mutex lock; |
| 125 | }; |
| 126 | |
| 127 | struct qib_user_sdma_queue * |
| 128 | qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt) |
| 129 | { |
| 130 | struct qib_user_sdma_queue *pq = |
| 131 | kmalloc(sizeof(struct qib_user_sdma_queue), GFP_KERNEL); |
| 132 | |
| 133 | if (!pq) |
| 134 | goto done; |
| 135 | |
| 136 | pq->counter = 0; |
| 137 | pq->sent_counter = 0; |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 138 | pq->num_pending = 0; |
| 139 | pq->num_sending = 0; |
| 140 | pq->added = 0; |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 141 | |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 142 | INIT_LIST_HEAD(&pq->sent); |
| 143 | spin_lock_init(&pq->sent_lock); |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 144 | mutex_init(&pq->lock); |
| 145 | |
| 146 | snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name), |
| 147 | "qib-user-sdma-pkts-%u-%02u.%02u", unit, ctxt, sctxt); |
| 148 | pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name, |
| 149 | sizeof(struct qib_user_sdma_pkt), |
| 150 | 0, 0, NULL); |
| 151 | |
| 152 | if (!pq->pkt_slab) |
| 153 | goto err_kfree; |
| 154 | |
| 155 | snprintf(pq->header_cache_name, sizeof(pq->header_cache_name), |
| 156 | "qib-user-sdma-headers-%u-%02u.%02u", unit, ctxt, sctxt); |
| 157 | pq->header_cache = dma_pool_create(pq->header_cache_name, |
| 158 | dev, |
| 159 | QIB_USER_SDMA_EXP_HEADER_LENGTH, |
| 160 | 4, 0); |
| 161 | if (!pq->header_cache) |
| 162 | goto err_slab; |
| 163 | |
| 164 | pq->dma_pages_root = RB_ROOT; |
| 165 | |
| 166 | goto done; |
| 167 | |
| 168 | err_slab: |
| 169 | kmem_cache_destroy(pq->pkt_slab); |
| 170 | err_kfree: |
| 171 | kfree(pq); |
| 172 | pq = NULL; |
| 173 | |
| 174 | done: |
| 175 | return pq; |
| 176 | } |
| 177 | |
| 178 | static void qib_user_sdma_init_frag(struct qib_user_sdma_pkt *pkt, |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 179 | int i, u16 offset, u16 len, |
| 180 | u16 first_desc, u16 last_desc, |
| 181 | u16 put_page, u16 dma_mapped, |
| 182 | struct page *page, void *kvaddr, |
| 183 | dma_addr_t dma_addr, u16 dma_length) |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 184 | { |
| 185 | pkt->addr[i].offset = offset; |
| 186 | pkt->addr[i].length = len; |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 187 | pkt->addr[i].first_desc = first_desc; |
| 188 | pkt->addr[i].last_desc = last_desc; |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 189 | pkt->addr[i].put_page = put_page; |
| 190 | pkt->addr[i].dma_mapped = dma_mapped; |
| 191 | pkt->addr[i].page = page; |
| 192 | pkt->addr[i].kvaddr = kvaddr; |
| 193 | pkt->addr[i].addr = dma_addr; |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 194 | pkt->addr[i].dma_length = dma_length; |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 195 | } |
| 196 | |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 197 | static void *qib_user_sdma_alloc_header(struct qib_user_sdma_queue *pq, |
| 198 | size_t len, dma_addr_t *dma_addr) |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 199 | { |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 200 | void *hdr; |
| 201 | |
| 202 | if (len == QIB_USER_SDMA_EXP_HEADER_LENGTH) |
| 203 | hdr = dma_pool_alloc(pq->header_cache, GFP_KERNEL, |
| 204 | dma_addr); |
| 205 | else |
| 206 | hdr = NULL; |
| 207 | |
| 208 | if (!hdr) { |
| 209 | hdr = kmalloc(len, GFP_KERNEL); |
| 210 | if (!hdr) |
| 211 | return NULL; |
| 212 | |
| 213 | *dma_addr = 0; |
| 214 | } |
| 215 | |
| 216 | return hdr; |
| 217 | } |
| 218 | |
| 219 | static int qib_user_sdma_page_to_frags(const struct qib_devdata *dd, |
| 220 | struct qib_user_sdma_queue *pq, |
| 221 | struct qib_user_sdma_pkt *pkt, |
| 222 | struct page *page, u16 put, |
| 223 | u16 offset, u16 len, void *kvaddr) |
| 224 | { |
| 225 | __le16 *pbc16; |
| 226 | void *pbcvaddr; |
| 227 | struct qib_message_header *hdr; |
| 228 | u16 newlen, pbclen, lastdesc, dma_mapped; |
| 229 | u32 vcto; |
| 230 | union qib_seqnum seqnum; |
| 231 | dma_addr_t pbcdaddr; |
| 232 | dma_addr_t dma_addr = |
| 233 | dma_map_page(&dd->pcidev->dev, |
| 234 | page, offset, len, DMA_TO_DEVICE); |
| 235 | int ret = 0; |
| 236 | |
| 237 | if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) { |
| 238 | /* |
| 239 | * dma mapping error, pkt has not managed |
| 240 | * this page yet, return the page here so |
| 241 | * the caller can ignore this page. |
| 242 | */ |
| 243 | if (put) { |
| 244 | put_page(page); |
| 245 | } else { |
| 246 | /* coalesce case */ |
| 247 | kunmap(page); |
| 248 | __free_page(page); |
| 249 | } |
| 250 | ret = -ENOMEM; |
| 251 | goto done; |
| 252 | } |
| 253 | offset = 0; |
| 254 | dma_mapped = 1; |
| 255 | |
| 256 | |
| 257 | next_fragment: |
| 258 | |
| 259 | /* |
| 260 | * In tid-sdma, the transfer length is restricted by |
| 261 | * receiver side current tid page length. |
| 262 | */ |
| 263 | if (pkt->tiddma && len > pkt->tidsm[pkt->tidsmidx].length) |
| 264 | newlen = pkt->tidsm[pkt->tidsmidx].length; |
| 265 | else |
| 266 | newlen = len; |
| 267 | |
| 268 | /* |
| 269 | * Then the transfer length is restricted by MTU. |
| 270 | * the last descriptor flag is determined by: |
| 271 | * 1. the current packet is at frag size length. |
| 272 | * 2. the current tid page is done if tid-sdma. |
| 273 | * 3. there is no more byte togo if sdma. |
| 274 | */ |
| 275 | lastdesc = 0; |
| 276 | if ((pkt->payload_size + newlen) >= pkt->frag_size) { |
| 277 | newlen = pkt->frag_size - pkt->payload_size; |
| 278 | lastdesc = 1; |
| 279 | } else if (pkt->tiddma) { |
| 280 | if (newlen == pkt->tidsm[pkt->tidsmidx].length) |
| 281 | lastdesc = 1; |
| 282 | } else { |
| 283 | if (newlen == pkt->bytes_togo) |
| 284 | lastdesc = 1; |
| 285 | } |
| 286 | |
| 287 | /* fill the next fragment in this page */ |
| 288 | qib_user_sdma_init_frag(pkt, pkt->naddr, /* index */ |
| 289 | offset, newlen, /* offset, len */ |
| 290 | 0, lastdesc, /* first last desc */ |
| 291 | put, dma_mapped, /* put page, dma mapped */ |
| 292 | page, kvaddr, /* struct page, virt addr */ |
| 293 | dma_addr, len); /* dma addr, dma length */ |
| 294 | pkt->bytes_togo -= newlen; |
| 295 | pkt->payload_size += newlen; |
| 296 | pkt->naddr++; |
| 297 | if (pkt->naddr == pkt->addrlimit) { |
| 298 | ret = -EFAULT; |
| 299 | goto done; |
| 300 | } |
| 301 | |
| 302 | /* If there is no more byte togo. (lastdesc==1) */ |
| 303 | if (pkt->bytes_togo == 0) { |
| 304 | /* The packet is done, header is not dma mapped yet. |
| 305 | * it should be from kmalloc */ |
| 306 | if (!pkt->addr[pkt->index].addr) { |
| 307 | pkt->addr[pkt->index].addr = |
| 308 | dma_map_single(&dd->pcidev->dev, |
| 309 | pkt->addr[pkt->index].kvaddr, |
| 310 | pkt->addr[pkt->index].dma_length, |
| 311 | DMA_TO_DEVICE); |
| 312 | if (dma_mapping_error(&dd->pcidev->dev, |
| 313 | pkt->addr[pkt->index].addr)) { |
| 314 | ret = -ENOMEM; |
| 315 | goto done; |
| 316 | } |
| 317 | pkt->addr[pkt->index].dma_mapped = 1; |
| 318 | } |
| 319 | |
| 320 | goto done; |
| 321 | } |
| 322 | |
| 323 | /* If tid-sdma, advance tid info. */ |
| 324 | if (pkt->tiddma) { |
| 325 | pkt->tidsm[pkt->tidsmidx].length -= newlen; |
| 326 | if (pkt->tidsm[pkt->tidsmidx].length) { |
| 327 | pkt->tidsm[pkt->tidsmidx].offset += newlen; |
| 328 | } else { |
| 329 | pkt->tidsmidx++; |
| 330 | if (pkt->tidsmidx == pkt->tidsmcount) { |
| 331 | ret = -EFAULT; |
| 332 | goto done; |
| 333 | } |
| 334 | } |
| 335 | } |
| 336 | |
| 337 | /* |
| 338 | * If this is NOT the last descriptor. (newlen==len) |
| 339 | * the current packet is not done yet, but the current |
| 340 | * send side page is done. |
| 341 | */ |
| 342 | if (lastdesc == 0) |
| 343 | goto done; |
| 344 | |
| 345 | /* |
| 346 | * If running this driver under PSM with message size |
| 347 | * fitting into one transfer unit, it is not possible |
| 348 | * to pass this line. otherwise, it is a buggggg. |
| 349 | */ |
| 350 | |
| 351 | /* |
| 352 | * Since the current packet is done, and there are more |
| 353 | * bytes togo, we need to create a new sdma header, copying |
| 354 | * from previous sdma header and modify both. |
| 355 | */ |
| 356 | pbclen = pkt->addr[pkt->index].length; |
| 357 | pbcvaddr = qib_user_sdma_alloc_header(pq, pbclen, &pbcdaddr); |
| 358 | if (!pbcvaddr) { |
| 359 | ret = -ENOMEM; |
| 360 | goto done; |
| 361 | } |
| 362 | /* Copy the previous sdma header to new sdma header */ |
| 363 | pbc16 = (__le16 *)pkt->addr[pkt->index].kvaddr; |
| 364 | memcpy(pbcvaddr, pbc16, pbclen); |
| 365 | |
| 366 | /* Modify the previous sdma header */ |
| 367 | hdr = (struct qib_message_header *)&pbc16[4]; |
| 368 | |
| 369 | /* New pbc length */ |
| 370 | pbc16[0] = cpu_to_le16(le16_to_cpu(pbc16[0])-(pkt->bytes_togo>>2)); |
| 371 | |
| 372 | /* New packet length */ |
| 373 | hdr->lrh[2] = cpu_to_be16(le16_to_cpu(pbc16[0])); |
| 374 | |
| 375 | if (pkt->tiddma) { |
| 376 | /* turn on the header suppression */ |
| 377 | hdr->iph.pkt_flags = |
| 378 | cpu_to_le16(le16_to_cpu(hdr->iph.pkt_flags)|0x2); |
| 379 | /* turn off ACK_REQ: 0x04 and EXPECTED_DONE: 0x20 */ |
| 380 | hdr->flags &= ~(0x04|0x20); |
| 381 | } else { |
| 382 | /* turn off extra bytes: 20-21 bits */ |
| 383 | hdr->bth[0] = cpu_to_be32(be32_to_cpu(hdr->bth[0])&0xFFCFFFFF); |
| 384 | /* turn off ACK_REQ: 0x04 */ |
| 385 | hdr->flags &= ~(0x04); |
| 386 | } |
| 387 | |
| 388 | /* New kdeth checksum */ |
| 389 | vcto = le32_to_cpu(hdr->iph.ver_ctxt_tid_offset); |
| 390 | hdr->iph.chksum = cpu_to_le16(QIB_LRH_BTH + |
| 391 | be16_to_cpu(hdr->lrh[2]) - |
| 392 | ((vcto>>16)&0xFFFF) - (vcto&0xFFFF) - |
| 393 | le16_to_cpu(hdr->iph.pkt_flags)); |
| 394 | |
| 395 | /* The packet is done, header is not dma mapped yet. |
| 396 | * it should be from kmalloc */ |
| 397 | if (!pkt->addr[pkt->index].addr) { |
| 398 | pkt->addr[pkt->index].addr = |
| 399 | dma_map_single(&dd->pcidev->dev, |
| 400 | pkt->addr[pkt->index].kvaddr, |
| 401 | pkt->addr[pkt->index].dma_length, |
| 402 | DMA_TO_DEVICE); |
| 403 | if (dma_mapping_error(&dd->pcidev->dev, |
| 404 | pkt->addr[pkt->index].addr)) { |
| 405 | ret = -ENOMEM; |
| 406 | goto done; |
| 407 | } |
| 408 | pkt->addr[pkt->index].dma_mapped = 1; |
| 409 | } |
| 410 | |
| 411 | /* Modify the new sdma header */ |
| 412 | pbc16 = (__le16 *)pbcvaddr; |
| 413 | hdr = (struct qib_message_header *)&pbc16[4]; |
| 414 | |
| 415 | /* New pbc length */ |
| 416 | pbc16[0] = cpu_to_le16(le16_to_cpu(pbc16[0])-(pkt->payload_size>>2)); |
| 417 | |
| 418 | /* New packet length */ |
| 419 | hdr->lrh[2] = cpu_to_be16(le16_to_cpu(pbc16[0])); |
| 420 | |
| 421 | if (pkt->tiddma) { |
| 422 | /* Set new tid and offset for new sdma header */ |
| 423 | hdr->iph.ver_ctxt_tid_offset = cpu_to_le32( |
| 424 | (le32_to_cpu(hdr->iph.ver_ctxt_tid_offset)&0xFF000000) + |
| 425 | (pkt->tidsm[pkt->tidsmidx].tid<<QLOGIC_IB_I_TID_SHIFT) + |
| 426 | (pkt->tidsm[pkt->tidsmidx].offset>>2)); |
| 427 | } else { |
| 428 | /* Middle protocol new packet offset */ |
| 429 | hdr->uwords[2] += pkt->payload_size; |
| 430 | } |
| 431 | |
| 432 | /* New kdeth checksum */ |
| 433 | vcto = le32_to_cpu(hdr->iph.ver_ctxt_tid_offset); |
| 434 | hdr->iph.chksum = cpu_to_le16(QIB_LRH_BTH + |
| 435 | be16_to_cpu(hdr->lrh[2]) - |
| 436 | ((vcto>>16)&0xFFFF) - (vcto&0xFFFF) - |
| 437 | le16_to_cpu(hdr->iph.pkt_flags)); |
| 438 | |
| 439 | /* Next sequence number in new sdma header */ |
| 440 | seqnum.val = be32_to_cpu(hdr->bth[2]); |
| 441 | if (pkt->tiddma) |
| 442 | seqnum.seq++; |
| 443 | else |
| 444 | seqnum.pkt++; |
| 445 | hdr->bth[2] = cpu_to_be32(seqnum.val); |
| 446 | |
| 447 | /* Init new sdma header. */ |
| 448 | qib_user_sdma_init_frag(pkt, pkt->naddr, /* index */ |
| 449 | 0, pbclen, /* offset, len */ |
| 450 | 1, 0, /* first last desc */ |
| 451 | 0, 0, /* put page, dma mapped */ |
| 452 | NULL, pbcvaddr, /* struct page, virt addr */ |
| 453 | pbcdaddr, pbclen); /* dma addr, dma length */ |
| 454 | pkt->index = pkt->naddr; |
| 455 | pkt->payload_size = 0; |
| 456 | pkt->naddr++; |
| 457 | if (pkt->naddr == pkt->addrlimit) { |
| 458 | ret = -EFAULT; |
| 459 | goto done; |
| 460 | } |
| 461 | |
| 462 | /* Prepare for next fragment in this page */ |
| 463 | if (newlen != len) { |
| 464 | if (dma_mapped) { |
| 465 | put = 0; |
| 466 | dma_mapped = 0; |
| 467 | page = NULL; |
| 468 | kvaddr = NULL; |
| 469 | } |
| 470 | len -= newlen; |
| 471 | offset += newlen; |
| 472 | |
| 473 | goto next_fragment; |
| 474 | } |
| 475 | |
| 476 | done: |
| 477 | return ret; |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 478 | } |
| 479 | |
| 480 | /* we've too many pages in the iovec, coalesce to a single page */ |
| 481 | static int qib_user_sdma_coalesce(const struct qib_devdata *dd, |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 482 | struct qib_user_sdma_queue *pq, |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 483 | struct qib_user_sdma_pkt *pkt, |
| 484 | const struct iovec *iov, |
| 485 | unsigned long niov) |
| 486 | { |
| 487 | int ret = 0; |
| 488 | struct page *page = alloc_page(GFP_KERNEL); |
| 489 | void *mpage_save; |
| 490 | char *mpage; |
| 491 | int i; |
| 492 | int len = 0; |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 493 | |
| 494 | if (!page) { |
| 495 | ret = -ENOMEM; |
| 496 | goto done; |
| 497 | } |
| 498 | |
| 499 | mpage = kmap(page); |
| 500 | mpage_save = mpage; |
| 501 | for (i = 0; i < niov; i++) { |
| 502 | int cfur; |
| 503 | |
| 504 | cfur = copy_from_user(mpage, |
| 505 | iov[i].iov_base, iov[i].iov_len); |
| 506 | if (cfur) { |
| 507 | ret = -EFAULT; |
| 508 | goto free_unmap; |
| 509 | } |
| 510 | |
| 511 | mpage += iov[i].iov_len; |
| 512 | len += iov[i].iov_len; |
| 513 | } |
| 514 | |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 515 | ret = qib_user_sdma_page_to_frags(dd, pq, pkt, |
| 516 | page, 0, 0, len, mpage_save); |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 517 | goto done; |
| 518 | |
| 519 | free_unmap: |
| 520 | kunmap(page); |
| 521 | __free_page(page); |
| 522 | done: |
| 523 | return ret; |
| 524 | } |
| 525 | |
| 526 | /* |
| 527 | * How many pages in this iovec element? |
| 528 | */ |
| 529 | static int qib_user_sdma_num_pages(const struct iovec *iov) |
| 530 | { |
| 531 | const unsigned long addr = (unsigned long) iov->iov_base; |
| 532 | const unsigned long len = iov->iov_len; |
| 533 | const unsigned long spage = addr & PAGE_MASK; |
| 534 | const unsigned long epage = (addr + len - 1) & PAGE_MASK; |
| 535 | |
| 536 | return 1 + ((epage - spage) >> PAGE_SHIFT); |
| 537 | } |
| 538 | |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 539 | static void qib_user_sdma_free_pkt_frag(struct device *dev, |
| 540 | struct qib_user_sdma_queue *pq, |
| 541 | struct qib_user_sdma_pkt *pkt, |
| 542 | int frag) |
| 543 | { |
| 544 | const int i = frag; |
| 545 | |
| 546 | if (pkt->addr[i].page) { |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 547 | /* only user data has page */ |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 548 | if (pkt->addr[i].dma_mapped) |
| 549 | dma_unmap_page(dev, |
| 550 | pkt->addr[i].addr, |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 551 | pkt->addr[i].dma_length, |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 552 | DMA_TO_DEVICE); |
| 553 | |
| 554 | if (pkt->addr[i].kvaddr) |
| 555 | kunmap(pkt->addr[i].page); |
| 556 | |
| 557 | if (pkt->addr[i].put_page) |
| 558 | put_page(pkt->addr[i].page); |
| 559 | else |
| 560 | __free_page(pkt->addr[i].page); |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 561 | } else if (pkt->addr[i].kvaddr) { |
| 562 | /* for headers */ |
| 563 | if (pkt->addr[i].dma_mapped) { |
| 564 | /* from kmalloc & dma mapped */ |
| 565 | dma_unmap_single(dev, |
| 566 | pkt->addr[i].addr, |
| 567 | pkt->addr[i].dma_length, |
| 568 | DMA_TO_DEVICE); |
| 569 | kfree(pkt->addr[i].kvaddr); |
| 570 | } else if (pkt->addr[i].addr) { |
| 571 | /* free coherent mem from cache... */ |
| 572 | dma_pool_free(pq->header_cache, |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 573 | pkt->addr[i].kvaddr, pkt->addr[i].addr); |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 574 | } else { |
| 575 | /* from kmalloc but not dma mapped */ |
| 576 | kfree(pkt->addr[i].kvaddr); |
| 577 | } |
| 578 | } |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 579 | } |
| 580 | |
| 581 | /* return number of pages pinned... */ |
| 582 | static int qib_user_sdma_pin_pages(const struct qib_devdata *dd, |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 583 | struct qib_user_sdma_queue *pq, |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 584 | struct qib_user_sdma_pkt *pkt, |
| 585 | unsigned long addr, int tlen, int npages) |
| 586 | { |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 587 | struct page *pages[8]; |
| 588 | int i, j; |
| 589 | int ret = 0; |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 590 | |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 591 | while (npages) { |
| 592 | if (npages > 8) |
| 593 | j = 8; |
| 594 | else |
| 595 | j = npages; |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 596 | |
Jan Kara | 603e772 | 2013-10-04 09:29:12 -0400 | [diff] [blame] | 597 | ret = get_user_pages_fast(addr, j, 0, pages); |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 598 | if (ret != j) { |
| 599 | i = 0; |
| 600 | j = ret; |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 601 | ret = -ENOMEM; |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 602 | goto free_pages; |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 603 | } |
| 604 | |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 605 | for (i = 0; i < j; i++) { |
| 606 | /* map the pages... */ |
| 607 | unsigned long fofs = addr & ~PAGE_MASK; |
| 608 | int flen = ((fofs + tlen) > PAGE_SIZE) ? |
| 609 | (PAGE_SIZE - fofs) : tlen; |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 610 | |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 611 | ret = qib_user_sdma_page_to_frags(dd, pq, pkt, |
| 612 | pages[i], 1, fofs, flen, NULL); |
| 613 | if (ret < 0) { |
| 614 | /* current page has beed taken |
| 615 | * care of inside above call. |
| 616 | */ |
| 617 | i++; |
| 618 | goto free_pages; |
| 619 | } |
| 620 | |
| 621 | addr += flen; |
| 622 | tlen -= flen; |
| 623 | } |
| 624 | |
| 625 | npages -= j; |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 626 | } |
| 627 | |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 628 | goto done; |
| 629 | |
| 630 | /* if error, return all pages not managed by pkt */ |
| 631 | free_pages: |
| 632 | while (i < j) |
| 633 | put_page(pages[i++]); |
| 634 | |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 635 | done: |
| 636 | return ret; |
| 637 | } |
| 638 | |
| 639 | static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd, |
| 640 | struct qib_user_sdma_queue *pq, |
| 641 | struct qib_user_sdma_pkt *pkt, |
| 642 | const struct iovec *iov, |
| 643 | unsigned long niov) |
| 644 | { |
| 645 | int ret = 0; |
| 646 | unsigned long idx; |
| 647 | |
| 648 | for (idx = 0; idx < niov; idx++) { |
| 649 | const int npages = qib_user_sdma_num_pages(iov + idx); |
| 650 | const unsigned long addr = (unsigned long) iov[idx].iov_base; |
| 651 | |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 652 | ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr, |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 653 | iov[idx].iov_len, npages); |
| 654 | if (ret < 0) |
| 655 | goto free_pkt; |
| 656 | } |
| 657 | |
| 658 | goto done; |
| 659 | |
| 660 | free_pkt: |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 661 | /* we need to ignore the first entry here */ |
| 662 | for (idx = 1; idx < pkt->naddr; idx++) |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 663 | qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx); |
| 664 | |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 665 | /* need to dma unmap the first entry, this is to restore to |
| 666 | * the original state so that caller can free the memory in |
| 667 | * error condition. Caller does not know if dma mapped or not*/ |
| 668 | if (pkt->addr[0].dma_mapped) { |
| 669 | dma_unmap_single(&dd->pcidev->dev, |
| 670 | pkt->addr[0].addr, |
| 671 | pkt->addr[0].dma_length, |
| 672 | DMA_TO_DEVICE); |
| 673 | pkt->addr[0].addr = 0; |
| 674 | pkt->addr[0].dma_mapped = 0; |
| 675 | } |
| 676 | |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 677 | done: |
| 678 | return ret; |
| 679 | } |
| 680 | |
| 681 | static int qib_user_sdma_init_payload(const struct qib_devdata *dd, |
| 682 | struct qib_user_sdma_queue *pq, |
| 683 | struct qib_user_sdma_pkt *pkt, |
| 684 | const struct iovec *iov, |
| 685 | unsigned long niov, int npages) |
| 686 | { |
| 687 | int ret = 0; |
| 688 | |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 689 | if (pkt->frag_size == pkt->bytes_togo && |
| 690 | npages >= ARRAY_SIZE(pkt->addr)) |
| 691 | ret = qib_user_sdma_coalesce(dd, pq, pkt, iov, niov); |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 692 | else |
| 693 | ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov); |
| 694 | |
| 695 | return ret; |
| 696 | } |
| 697 | |
| 698 | /* free a packet list -- return counter value of last packet */ |
| 699 | static void qib_user_sdma_free_pkt_list(struct device *dev, |
| 700 | struct qib_user_sdma_queue *pq, |
| 701 | struct list_head *list) |
| 702 | { |
| 703 | struct qib_user_sdma_pkt *pkt, *pkt_next; |
| 704 | |
| 705 | list_for_each_entry_safe(pkt, pkt_next, list, list) { |
| 706 | int i; |
| 707 | |
| 708 | for (i = 0; i < pkt->naddr; i++) |
| 709 | qib_user_sdma_free_pkt_frag(dev, pq, pkt, i); |
| 710 | |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 711 | if (pkt->largepkt) |
| 712 | kfree(pkt); |
| 713 | else |
| 714 | kmem_cache_free(pq->pkt_slab, pkt); |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 715 | } |
Mike Marciniszyn | f73df40 | 2011-01-10 17:42:21 -0800 | [diff] [blame] | 716 | INIT_LIST_HEAD(list); |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 717 | } |
| 718 | |
| 719 | /* |
| 720 | * copy headers, coalesce etc -- pq->lock must be held |
| 721 | * |
| 722 | * we queue all the packets to list, returning the |
| 723 | * number of bytes total. list must be empty initially, |
| 724 | * as, if there is an error we clean it... |
| 725 | */ |
| 726 | static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd, |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 727 | struct qib_pportdata *ppd, |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 728 | struct qib_user_sdma_queue *pq, |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 729 | const struct iovec *iov, |
| 730 | unsigned long niov, |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 731 | struct list_head *list, |
| 732 | int *maxpkts, int *ndesc) |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 733 | { |
| 734 | unsigned long idx = 0; |
| 735 | int ret = 0; |
| 736 | int npkts = 0; |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 737 | __le32 *pbc; |
| 738 | dma_addr_t dma_addr; |
| 739 | struct qib_user_sdma_pkt *pkt = NULL; |
| 740 | size_t len; |
| 741 | size_t nw; |
| 742 | u32 counter = pq->counter; |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 743 | u16 frag_size; |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 744 | |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 745 | while (idx < niov && npkts < *maxpkts) { |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 746 | const unsigned long addr = (unsigned long) iov[idx].iov_base; |
| 747 | const unsigned long idx_save = idx; |
| 748 | unsigned pktnw; |
| 749 | unsigned pktnwc; |
| 750 | int nfrags = 0; |
| 751 | int npages = 0; |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 752 | int bytes_togo = 0; |
| 753 | int tiddma = 0; |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 754 | int cfur; |
| 755 | |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 756 | len = iov[idx].iov_len; |
| 757 | nw = len >> 2; |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 758 | |
| 759 | if (len < QIB_USER_SDMA_MIN_HEADER_LENGTH || |
| 760 | len > PAGE_SIZE || len & 3 || addr & 3) { |
| 761 | ret = -EINVAL; |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 762 | goto free_list; |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 763 | } |
| 764 | |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 765 | pbc = qib_user_sdma_alloc_header(pq, len, &dma_addr); |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 766 | if (!pbc) { |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 767 | ret = -ENOMEM; |
| 768 | goto free_list; |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 769 | } |
| 770 | |
| 771 | cfur = copy_from_user(pbc, iov[idx].iov_base, len); |
| 772 | if (cfur) { |
| 773 | ret = -EFAULT; |
| 774 | goto free_pbc; |
| 775 | } |
| 776 | |
| 777 | /* |
| 778 | * This assignment is a bit strange. it's because the |
| 779 | * the pbc counts the number of 32 bit words in the full |
| 780 | * packet _except_ the first word of the pbc itself... |
| 781 | */ |
| 782 | pktnwc = nw - 1; |
| 783 | |
| 784 | /* |
| 785 | * pktnw computation yields the number of 32 bit words |
| 786 | * that the caller has indicated in the PBC. note that |
| 787 | * this is one less than the total number of words that |
| 788 | * goes to the send DMA engine as the first 32 bit word |
| 789 | * of the PBC itself is not counted. Armed with this count, |
| 790 | * we can verify that the packet is consistent with the |
| 791 | * iovec lengths. |
| 792 | */ |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 793 | pktnw = le32_to_cpu(*pbc) & 0xFFFF; |
| 794 | if (pktnw < pktnwc) { |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 795 | ret = -EINVAL; |
| 796 | goto free_pbc; |
| 797 | } |
| 798 | |
| 799 | idx++; |
| 800 | while (pktnwc < pktnw && idx < niov) { |
| 801 | const size_t slen = iov[idx].iov_len; |
| 802 | const unsigned long faddr = |
| 803 | (unsigned long) iov[idx].iov_base; |
| 804 | |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 805 | if (slen & 3 || faddr & 3 || !slen) { |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 806 | ret = -EINVAL; |
| 807 | goto free_pbc; |
| 808 | } |
| 809 | |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 810 | npages += qib_user_sdma_num_pages(&iov[idx]); |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 811 | |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 812 | bytes_togo += slen; |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 813 | pktnwc += slen >> 2; |
| 814 | idx++; |
| 815 | nfrags++; |
| 816 | } |
| 817 | |
| 818 | if (pktnwc != pktnw) { |
| 819 | ret = -EINVAL; |
| 820 | goto free_pbc; |
| 821 | } |
| 822 | |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 823 | frag_size = ((le32_to_cpu(*pbc))>>16) & 0xFFFF; |
| 824 | if (((frag_size ? frag_size : bytes_togo) + len) > |
| 825 | ppd->ibmaxlen) { |
| 826 | ret = -EINVAL; |
| 827 | goto free_pbc; |
| 828 | } |
| 829 | |
| 830 | if (frag_size) { |
| 831 | int pktsize, tidsmsize, n; |
| 832 | |
| 833 | n = npages*((2*PAGE_SIZE/frag_size)+1); |
| 834 | pktsize = sizeof(*pkt) + sizeof(pkt->addr[0])*n; |
| 835 | |
| 836 | /* |
| 837 | * Determine if this is tid-sdma or just sdma. |
| 838 | */ |
| 839 | tiddma = (((le32_to_cpu(pbc[7])>> |
| 840 | QLOGIC_IB_I_TID_SHIFT)& |
| 841 | QLOGIC_IB_I_TID_MASK) != |
| 842 | QLOGIC_IB_I_TID_MASK); |
| 843 | |
| 844 | if (tiddma) |
| 845 | tidsmsize = iov[idx].iov_len; |
| 846 | else |
| 847 | tidsmsize = 0; |
| 848 | |
| 849 | pkt = kmalloc(pktsize+tidsmsize, GFP_KERNEL); |
| 850 | if (!pkt) { |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 851 | ret = -ENOMEM; |
| 852 | goto free_pbc; |
| 853 | } |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 854 | pkt->largepkt = 1; |
| 855 | pkt->frag_size = frag_size; |
| 856 | pkt->addrlimit = n + ARRAY_SIZE(pkt->addr); |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 857 | |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 858 | if (tiddma) { |
| 859 | char *tidsm = (char *)pkt + pktsize; |
| 860 | cfur = copy_from_user(tidsm, |
| 861 | iov[idx].iov_base, tidsmsize); |
| 862 | if (cfur) { |
| 863 | ret = -EFAULT; |
| 864 | goto free_pkt; |
| 865 | } |
| 866 | pkt->tidsm = |
| 867 | (struct qib_tid_session_member *)tidsm; |
| 868 | pkt->tidsmcount = tidsmsize/ |
| 869 | sizeof(struct qib_tid_session_member); |
| 870 | pkt->tidsmidx = 0; |
| 871 | idx++; |
| 872 | } |
| 873 | |
| 874 | /* |
| 875 | * pbc 'fill1' field is borrowed to pass frag size, |
| 876 | * we need to clear it after picking frag size, the |
| 877 | * hardware requires this field to be zero. |
| 878 | */ |
| 879 | *pbc = cpu_to_le32(le32_to_cpu(*pbc) & 0x0000FFFF); |
| 880 | } else { |
| 881 | pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL); |
| 882 | if (!pkt) { |
| 883 | ret = -ENOMEM; |
| 884 | goto free_pbc; |
| 885 | } |
| 886 | pkt->largepkt = 0; |
| 887 | pkt->frag_size = bytes_togo; |
| 888 | pkt->addrlimit = ARRAY_SIZE(pkt->addr); |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 889 | } |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 890 | pkt->bytes_togo = bytes_togo; |
| 891 | pkt->payload_size = 0; |
| 892 | pkt->counter = counter; |
| 893 | pkt->tiddma = tiddma; |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 894 | |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 895 | /* setup the first header */ |
| 896 | qib_user_sdma_init_frag(pkt, 0, /* index */ |
| 897 | 0, len, /* offset, len */ |
| 898 | 1, 0, /* first last desc */ |
| 899 | 0, 0, /* put page, dma mapped */ |
| 900 | NULL, pbc, /* struct page, virt addr */ |
| 901 | dma_addr, len); /* dma addr, dma length */ |
| 902 | pkt->index = 0; |
| 903 | pkt->naddr = 1; |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 904 | |
| 905 | if (nfrags) { |
| 906 | ret = qib_user_sdma_init_payload(dd, pq, pkt, |
| 907 | iov + idx_save + 1, |
| 908 | nfrags, npages); |
| 909 | if (ret < 0) |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 910 | goto free_pkt; |
| 911 | } else { |
| 912 | /* since there is no payload, mark the |
| 913 | * header as the last desc. */ |
| 914 | pkt->addr[0].last_desc = 1; |
| 915 | |
| 916 | if (dma_addr == 0) { |
| 917 | /* |
| 918 | * the header is not dma mapped yet. |
| 919 | * it should be from kmalloc. |
| 920 | */ |
| 921 | dma_addr = dma_map_single(&dd->pcidev->dev, |
| 922 | pbc, len, DMA_TO_DEVICE); |
| 923 | if (dma_mapping_error(&dd->pcidev->dev, |
| 924 | dma_addr)) { |
| 925 | ret = -ENOMEM; |
| 926 | goto free_pkt; |
| 927 | } |
| 928 | pkt->addr[0].addr = dma_addr; |
| 929 | pkt->addr[0].dma_mapped = 1; |
| 930 | } |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 931 | } |
| 932 | |
| 933 | counter++; |
| 934 | npkts++; |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 935 | pkt->pq = pq; |
| 936 | pkt->index = 0; /* reset index for push on hw */ |
| 937 | *ndesc += pkt->naddr; |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 938 | |
| 939 | list_add_tail(&pkt->list, list); |
| 940 | } |
| 941 | |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 942 | *maxpkts = npkts; |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 943 | ret = idx; |
| 944 | goto done; |
| 945 | |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 946 | free_pkt: |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 947 | if (pkt->largepkt) |
| 948 | kfree(pkt); |
| 949 | else |
| 950 | kmem_cache_free(pq->pkt_slab, pkt); |
| 951 | free_pbc: |
| 952 | if (dma_addr) |
| 953 | dma_pool_free(pq->header_cache, pbc, dma_addr); |
| 954 | else |
| 955 | kfree(pbc); |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 956 | free_list: |
| 957 | qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list); |
| 958 | done: |
| 959 | return ret; |
| 960 | } |
| 961 | |
| 962 | static void qib_user_sdma_set_complete_counter(struct qib_user_sdma_queue *pq, |
| 963 | u32 c) |
| 964 | { |
| 965 | pq->sent_counter = c; |
| 966 | } |
| 967 | |
| 968 | /* try to clean out queue -- needs pq->lock */ |
| 969 | static int qib_user_sdma_queue_clean(struct qib_pportdata *ppd, |
| 970 | struct qib_user_sdma_queue *pq) |
| 971 | { |
| 972 | struct qib_devdata *dd = ppd->dd; |
| 973 | struct list_head free_list; |
| 974 | struct qib_user_sdma_pkt *pkt; |
| 975 | struct qib_user_sdma_pkt *pkt_prev; |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 976 | unsigned long flags; |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 977 | int ret = 0; |
| 978 | |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 979 | if (!pq->num_sending) |
| 980 | return 0; |
| 981 | |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 982 | INIT_LIST_HEAD(&free_list); |
| 983 | |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 984 | /* |
| 985 | * We need this spin lock here because interrupt handler |
| 986 | * might modify this list in qib_user_sdma_send_desc(), also |
| 987 | * we can not get interrupted, otherwise it is a deadlock. |
| 988 | */ |
| 989 | spin_lock_irqsave(&pq->sent_lock, flags); |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 990 | list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) { |
| 991 | s64 descd = ppd->sdma_descq_removed - pkt->added; |
| 992 | |
| 993 | if (descd < 0) |
| 994 | break; |
| 995 | |
| 996 | list_move_tail(&pkt->list, &free_list); |
| 997 | |
| 998 | /* one more packet cleaned */ |
| 999 | ret++; |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 1000 | pq->num_sending--; |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 1001 | } |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 1002 | spin_unlock_irqrestore(&pq->sent_lock, flags); |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 1003 | |
| 1004 | if (!list_empty(&free_list)) { |
| 1005 | u32 counter; |
| 1006 | |
| 1007 | pkt = list_entry(free_list.prev, |
| 1008 | struct qib_user_sdma_pkt, list); |
| 1009 | counter = pkt->counter; |
| 1010 | |
| 1011 | qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list); |
| 1012 | qib_user_sdma_set_complete_counter(pq, counter); |
| 1013 | } |
| 1014 | |
| 1015 | return ret; |
| 1016 | } |
| 1017 | |
| 1018 | void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq) |
| 1019 | { |
| 1020 | if (!pq) |
| 1021 | return; |
| 1022 | |
| 1023 | kmem_cache_destroy(pq->pkt_slab); |
| 1024 | dma_pool_destroy(pq->header_cache); |
| 1025 | kfree(pq); |
| 1026 | } |
| 1027 | |
| 1028 | /* clean descriptor queue, returns > 0 if some elements cleaned */ |
| 1029 | static int qib_user_sdma_hwqueue_clean(struct qib_pportdata *ppd) |
| 1030 | { |
| 1031 | int ret; |
| 1032 | unsigned long flags; |
| 1033 | |
| 1034 | spin_lock_irqsave(&ppd->sdma_lock, flags); |
| 1035 | ret = qib_sdma_make_progress(ppd); |
| 1036 | spin_unlock_irqrestore(&ppd->sdma_lock, flags); |
| 1037 | |
| 1038 | return ret; |
| 1039 | } |
| 1040 | |
| 1041 | /* we're in close, drain packets so that we can cleanup successfully... */ |
| 1042 | void qib_user_sdma_queue_drain(struct qib_pportdata *ppd, |
| 1043 | struct qib_user_sdma_queue *pq) |
| 1044 | { |
| 1045 | struct qib_devdata *dd = ppd->dd; |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 1046 | unsigned long flags; |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 1047 | int i; |
| 1048 | |
| 1049 | if (!pq) |
| 1050 | return; |
| 1051 | |
| 1052 | for (i = 0; i < QIB_USER_SDMA_DRAIN_TIMEOUT; i++) { |
| 1053 | mutex_lock(&pq->lock); |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 1054 | if (!pq->num_pending && !pq->num_sending) { |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 1055 | mutex_unlock(&pq->lock); |
| 1056 | break; |
| 1057 | } |
| 1058 | qib_user_sdma_hwqueue_clean(ppd); |
| 1059 | qib_user_sdma_queue_clean(ppd, pq); |
| 1060 | mutex_unlock(&pq->lock); |
| 1061 | msleep(10); |
| 1062 | } |
| 1063 | |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 1064 | if (pq->num_pending || pq->num_sending) { |
| 1065 | struct qib_user_sdma_pkt *pkt; |
| 1066 | struct qib_user_sdma_pkt *pkt_prev; |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 1067 | struct list_head free_list; |
| 1068 | |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 1069 | mutex_lock(&pq->lock); |
| 1070 | spin_lock_irqsave(&ppd->sdma_lock, flags); |
| 1071 | /* |
| 1072 | * Since we hold sdma_lock, it is safe without sent_lock. |
| 1073 | */ |
| 1074 | if (pq->num_pending) { |
| 1075 | list_for_each_entry_safe(pkt, pkt_prev, |
| 1076 | &ppd->sdma_userpending, list) { |
| 1077 | if (pkt->pq == pq) { |
| 1078 | list_move_tail(&pkt->list, &pq->sent); |
| 1079 | pq->num_pending--; |
| 1080 | pq->num_sending++; |
| 1081 | } |
| 1082 | } |
| 1083 | } |
| 1084 | spin_unlock_irqrestore(&ppd->sdma_lock, flags); |
| 1085 | |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 1086 | qib_dev_err(dd, "user sdma lists not empty: forcing!\n"); |
| 1087 | INIT_LIST_HEAD(&free_list); |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 1088 | list_splice_init(&pq->sent, &free_list); |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 1089 | pq->num_sending = 0; |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 1090 | qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list); |
| 1091 | mutex_unlock(&pq->lock); |
| 1092 | } |
| 1093 | } |
| 1094 | |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 1095 | static inline __le64 qib_sdma_make_desc0(u8 gen, |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 1096 | u64 addr, u64 dwlen, u64 dwoffset) |
| 1097 | { |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 1098 | return cpu_to_le64(/* SDmaPhyAddr[31:0] */ |
| 1099 | ((addr & 0xfffffffcULL) << 32) | |
| 1100 | /* SDmaGeneration[1:0] */ |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 1101 | ((gen & 3ULL) << 30) | |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 1102 | /* SDmaDwordCount[10:0] */ |
| 1103 | ((dwlen & 0x7ffULL) << 16) | |
| 1104 | /* SDmaBufOffset[12:2] */ |
| 1105 | (dwoffset & 0x7ffULL)); |
| 1106 | } |
| 1107 | |
| 1108 | static inline __le64 qib_sdma_make_first_desc0(__le64 descq) |
| 1109 | { |
| 1110 | return descq | cpu_to_le64(1ULL << 12); |
| 1111 | } |
| 1112 | |
| 1113 | static inline __le64 qib_sdma_make_last_desc0(__le64 descq) |
| 1114 | { |
| 1115 | /* last */ /* dma head */ |
| 1116 | return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13); |
| 1117 | } |
| 1118 | |
| 1119 | static inline __le64 qib_sdma_make_desc1(u64 addr) |
| 1120 | { |
| 1121 | /* SDmaPhyAddr[47:32] */ |
| 1122 | return cpu_to_le64(addr >> 32); |
| 1123 | } |
| 1124 | |
| 1125 | static void qib_user_sdma_send_frag(struct qib_pportdata *ppd, |
| 1126 | struct qib_user_sdma_pkt *pkt, int idx, |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 1127 | unsigned ofs, u16 tail, u8 gen) |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 1128 | { |
| 1129 | const u64 addr = (u64) pkt->addr[idx].addr + |
| 1130 | (u64) pkt->addr[idx].offset; |
| 1131 | const u64 dwlen = (u64) pkt->addr[idx].length / 4; |
| 1132 | __le64 *descqp; |
| 1133 | __le64 descq0; |
| 1134 | |
| 1135 | descqp = &ppd->sdma_descq[tail].qw[0]; |
| 1136 | |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 1137 | descq0 = qib_sdma_make_desc0(gen, addr, dwlen, ofs); |
| 1138 | if (pkt->addr[idx].first_desc) |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 1139 | descq0 = qib_sdma_make_first_desc0(descq0); |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 1140 | if (pkt->addr[idx].last_desc) { |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 1141 | descq0 = qib_sdma_make_last_desc0(descq0); |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 1142 | if (ppd->sdma_intrequest) { |
| 1143 | descq0 |= cpu_to_le64(1ULL << 15); |
| 1144 | ppd->sdma_intrequest = 0; |
| 1145 | } |
| 1146 | } |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 1147 | |
| 1148 | descqp[0] = descq0; |
| 1149 | descqp[1] = qib_sdma_make_desc1(addr); |
| 1150 | } |
| 1151 | |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 1152 | void qib_user_sdma_send_desc(struct qib_pportdata *ppd, |
| 1153 | struct list_head *pktlist) |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 1154 | { |
| 1155 | struct qib_devdata *dd = ppd->dd; |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 1156 | u16 nfree, nsent; |
| 1157 | u16 tail, tail_c; |
| 1158 | u8 gen, gen_c; |
| 1159 | |
| 1160 | nfree = qib_sdma_descq_freecnt(ppd); |
| 1161 | if (!nfree) |
| 1162 | return; |
| 1163 | |
| 1164 | retry: |
| 1165 | nsent = 0; |
| 1166 | tail_c = tail = ppd->sdma_descq_tail; |
| 1167 | gen_c = gen = ppd->sdma_generation; |
| 1168 | while (!list_empty(pktlist)) { |
| 1169 | struct qib_user_sdma_pkt *pkt = |
| 1170 | list_entry(pktlist->next, struct qib_user_sdma_pkt, |
| 1171 | list); |
| 1172 | int i, j, c = 0; |
| 1173 | unsigned ofs = 0; |
| 1174 | u16 dtail = tail; |
| 1175 | |
| 1176 | for (i = pkt->index; i < pkt->naddr && nfree; i++) { |
| 1177 | qib_user_sdma_send_frag(ppd, pkt, i, ofs, tail, gen); |
| 1178 | ofs += pkt->addr[i].length >> 2; |
| 1179 | |
| 1180 | if (++tail == ppd->sdma_descq_cnt) { |
| 1181 | tail = 0; |
| 1182 | ++gen; |
| 1183 | ppd->sdma_intrequest = 1; |
| 1184 | } else if (tail == (ppd->sdma_descq_cnt>>1)) { |
| 1185 | ppd->sdma_intrequest = 1; |
| 1186 | } |
| 1187 | nfree--; |
| 1188 | if (pkt->addr[i].last_desc == 0) |
| 1189 | continue; |
| 1190 | |
| 1191 | /* |
| 1192 | * If the packet is >= 2KB mtu equivalent, we |
| 1193 | * have to use the large buffers, and have to |
| 1194 | * mark each descriptor as part of a large |
| 1195 | * buffer packet. |
| 1196 | */ |
| 1197 | if (ofs > dd->piosize2kmax_dwords) { |
| 1198 | for (j = pkt->index; j <= i; j++) { |
| 1199 | ppd->sdma_descq[dtail].qw[0] |= |
| 1200 | cpu_to_le64(1ULL << 14); |
| 1201 | if (++dtail == ppd->sdma_descq_cnt) |
| 1202 | dtail = 0; |
| 1203 | } |
| 1204 | } |
| 1205 | c += i + 1 - pkt->index; |
| 1206 | pkt->index = i + 1; /* index for next first */ |
| 1207 | tail_c = dtail = tail; |
| 1208 | gen_c = gen; |
| 1209 | ofs = 0; /* reset for next packet */ |
| 1210 | } |
| 1211 | |
| 1212 | ppd->sdma_descq_added += c; |
| 1213 | nsent += c; |
| 1214 | if (pkt->index == pkt->naddr) { |
| 1215 | pkt->added = ppd->sdma_descq_added; |
| 1216 | pkt->pq->added = pkt->added; |
| 1217 | pkt->pq->num_pending--; |
| 1218 | spin_lock(&pkt->pq->sent_lock); |
| 1219 | pkt->pq->num_sending++; |
| 1220 | list_move_tail(&pkt->list, &pkt->pq->sent); |
| 1221 | spin_unlock(&pkt->pq->sent_lock); |
| 1222 | } |
| 1223 | if (!nfree || (nsent<<2) > ppd->sdma_descq_cnt) |
| 1224 | break; |
| 1225 | } |
| 1226 | |
| 1227 | /* advance the tail on the chip if necessary */ |
| 1228 | if (ppd->sdma_descq_tail != tail_c) { |
| 1229 | ppd->sdma_generation = gen_c; |
| 1230 | dd->f_sdma_update_tail(ppd, tail_c); |
| 1231 | } |
| 1232 | |
| 1233 | if (nfree && !list_empty(pktlist)) |
| 1234 | goto retry; |
| 1235 | |
| 1236 | return; |
| 1237 | } |
| 1238 | |
| 1239 | /* pq->lock must be held, get packets on the wire... */ |
| 1240 | static int qib_user_sdma_push_pkts(struct qib_pportdata *ppd, |
| 1241 | struct qib_user_sdma_queue *pq, |
| 1242 | struct list_head *pktlist, int count) |
| 1243 | { |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 1244 | int ret = 0; |
| 1245 | unsigned long flags; |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 1246 | |
| 1247 | if (unlikely(!(ppd->lflags & QIBL_LINKACTIVE))) |
| 1248 | return -ECOMM; |
| 1249 | |
| 1250 | spin_lock_irqsave(&ppd->sdma_lock, flags); |
| 1251 | |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 1252 | if (unlikely(!__qib_sdma_running(ppd))) { |
| 1253 | ret = -ECOMM; |
| 1254 | goto unlock; |
| 1255 | } |
| 1256 | |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 1257 | pq->num_pending += count; |
| 1258 | list_splice_tail_init(pktlist, &ppd->sdma_userpending); |
| 1259 | qib_user_sdma_send_desc(ppd, &ppd->sdma_userpending); |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 1260 | |
| 1261 | unlock: |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 1262 | spin_unlock_irqrestore(&ppd->sdma_lock, flags); |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 1263 | return ret; |
| 1264 | } |
| 1265 | |
| 1266 | int qib_user_sdma_writev(struct qib_ctxtdata *rcd, |
| 1267 | struct qib_user_sdma_queue *pq, |
| 1268 | const struct iovec *iov, |
| 1269 | unsigned long dim) |
| 1270 | { |
| 1271 | struct qib_devdata *dd = rcd->dd; |
| 1272 | struct qib_pportdata *ppd = rcd->ppd; |
| 1273 | int ret = 0; |
| 1274 | struct list_head list; |
| 1275 | int npkts = 0; |
| 1276 | |
| 1277 | INIT_LIST_HEAD(&list); |
| 1278 | |
| 1279 | mutex_lock(&pq->lock); |
| 1280 | |
| 1281 | /* why not -ECOMM like qib_user_sdma_push_pkts() below? */ |
| 1282 | if (!qib_sdma_running(ppd)) |
| 1283 | goto done_unlock; |
| 1284 | |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 1285 | /* if I have packets not complete yet */ |
| 1286 | if (pq->added > ppd->sdma_descq_removed) |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 1287 | qib_user_sdma_hwqueue_clean(ppd); |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 1288 | /* if I have complete packets to be freed */ |
| 1289 | if (pq->num_sending) |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 1290 | qib_user_sdma_queue_clean(ppd, pq); |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 1291 | |
| 1292 | while (dim) { |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 1293 | int mxp = 8; |
| 1294 | int ndesc = 0; |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 1295 | |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 1296 | ret = qib_user_sdma_queue_pkts(dd, ppd, pq, |
| 1297 | iov, dim, &list, &mxp, &ndesc); |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 1298 | if (ret < 0) |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 1299 | goto done_unlock; |
| 1300 | else { |
| 1301 | dim -= ret; |
| 1302 | iov += ret; |
| 1303 | } |
| 1304 | |
| 1305 | /* force packets onto the sdma hw queue... */ |
| 1306 | if (!list_empty(&list)) { |
| 1307 | /* |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 1308 | * Lazily clean hw queue. |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 1309 | */ |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 1310 | if (qib_sdma_descq_freecnt(ppd) < ndesc) { |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 1311 | qib_user_sdma_hwqueue_clean(ppd); |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 1312 | if (pq->num_sending) |
| 1313 | qib_user_sdma_queue_clean(ppd, pq); |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 1314 | } |
| 1315 | |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 1316 | ret = qib_user_sdma_push_pkts(ppd, pq, &list, mxp); |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 1317 | if (ret < 0) |
| 1318 | goto done_unlock; |
| 1319 | else { |
CQ Tang | 4668e4b | 2013-07-19 13:57:21 -0400 | [diff] [blame] | 1320 | npkts += mxp; |
| 1321 | pq->counter += mxp; |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 1322 | } |
| 1323 | } |
| 1324 | } |
| 1325 | |
| 1326 | done_unlock: |
| 1327 | if (!list_empty(&list)) |
| 1328 | qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list); |
| 1329 | mutex_unlock(&pq->lock); |
| 1330 | |
| 1331 | return (ret < 0) ? ret : npkts; |
| 1332 | } |
| 1333 | |
| 1334 | int qib_user_sdma_make_progress(struct qib_pportdata *ppd, |
| 1335 | struct qib_user_sdma_queue *pq) |
| 1336 | { |
| 1337 | int ret = 0; |
| 1338 | |
| 1339 | mutex_lock(&pq->lock); |
| 1340 | qib_user_sdma_hwqueue_clean(ppd); |
| 1341 | ret = qib_user_sdma_queue_clean(ppd, pq); |
| 1342 | mutex_unlock(&pq->lock); |
| 1343 | |
| 1344 | return ret; |
| 1345 | } |
| 1346 | |
| 1347 | u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq) |
| 1348 | { |
| 1349 | return pq ? pq->sent_counter : 0; |
| 1350 | } |
| 1351 | |
| 1352 | u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq) |
| 1353 | { |
| 1354 | return pq ? pq->counter : 0; |
| 1355 | } |