Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1 | /* |
Jubin John | 05d6ac1 | 2016-02-14 20:22:17 -0800 | [diff] [blame] | 2 | * Copyright(c) 2015, 2016 Intel Corporation. |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 3 | * |
| 4 | * This file is provided under a dual BSD/GPLv2 license. When using or |
| 5 | * redistributing this file, you may do so under either license. |
| 6 | * |
| 7 | * GPL LICENSE SUMMARY |
| 8 | * |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 9 | * This program is free software; you can redistribute it and/or modify |
| 10 | * it under the terms of version 2 of the GNU General Public License as |
| 11 | * published by the Free Software Foundation. |
| 12 | * |
| 13 | * This program is distributed in the hope that it will be useful, but |
| 14 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 16 | * General Public License for more details. |
| 17 | * |
| 18 | * BSD LICENSE |
| 19 | * |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 20 | * Redistribution and use in source and binary forms, with or without |
| 21 | * modification, are permitted provided that the following conditions |
| 22 | * are met: |
| 23 | * |
| 24 | * - Redistributions of source code must retain the above copyright |
| 25 | * notice, this list of conditions and the following disclaimer. |
| 26 | * - Redistributions in binary form must reproduce the above copyright |
| 27 | * notice, this list of conditions and the following disclaimer in |
| 28 | * the documentation and/or other materials provided with the |
| 29 | * distribution. |
| 30 | * - Neither the name of Intel Corporation nor the names of its |
| 31 | * contributors may be used to endorse or promote products derived |
| 32 | * from this software without specific prior written permission. |
| 33 | * |
| 34 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 35 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 36 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 37 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 38 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 39 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 40 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 41 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 42 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 43 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 44 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 45 | * |
| 46 | */ |
| 47 | #include <linux/mm.h> |
| 48 | #include <linux/types.h> |
| 49 | #include <linux/device.h> |
| 50 | #include <linux/dmapool.h> |
| 51 | #include <linux/slab.h> |
| 52 | #include <linux/list.h> |
| 53 | #include <linux/highmem.h> |
| 54 | #include <linux/io.h> |
| 55 | #include <linux/uio.h> |
| 56 | #include <linux/rbtree.h> |
| 57 | #include <linux/spinlock.h> |
| 58 | #include <linux/delay.h> |
| 59 | #include <linux/kthread.h> |
| 60 | #include <linux/mmu_context.h> |
| 61 | #include <linux/module.h> |
| 62 | #include <linux/vmalloc.h> |
| 63 | |
| 64 | #include "hfi.h" |
| 65 | #include "sdma.h" |
| 66 | #include "user_sdma.h" |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 67 | #include "verbs.h" /* for the headers */ |
| 68 | #include "common.h" /* for struct hfi1_tid_info */ |
| 69 | #include "trace.h" |
Mitko Haralanov | 5cd3a88d | 2016-03-08 11:15:22 -0800 | [diff] [blame] | 70 | #include "mmu_rb.h" |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 71 | |
| 72 | static uint hfi1_sdma_comp_ring_size = 128; |
| 73 | module_param_named(sdma_comp_size, hfi1_sdma_comp_ring_size, uint, S_IRUGO); |
| 74 | MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 128"); |
| 75 | |
| 76 | /* The maximum number of Data io vectors per message/request */ |
| 77 | #define MAX_VECTORS_PER_REQ 8 |
| 78 | /* |
| 79 | * Maximum number of packet to send from each message/request |
| 80 | * before moving to the next one. |
| 81 | */ |
| 82 | #define MAX_PKTS_PER_QUEUE 16 |
| 83 | |
| 84 | #define num_pages(x) (1 + ((((x) - 1) & PAGE_MASK) >> PAGE_SHIFT)) |
| 85 | |
| 86 | #define req_opcode(x) \ |
| 87 | (((x) >> HFI1_SDMA_REQ_OPCODE_SHIFT) & HFI1_SDMA_REQ_OPCODE_MASK) |
| 88 | #define req_version(x) \ |
| 89 | (((x) >> HFI1_SDMA_REQ_VERSION_SHIFT) & HFI1_SDMA_REQ_OPCODE_MASK) |
| 90 | #define req_iovcnt(x) \ |
| 91 | (((x) >> HFI1_SDMA_REQ_IOVCNT_SHIFT) & HFI1_SDMA_REQ_IOVCNT_MASK) |
| 92 | |
| 93 | /* Number of BTH.PSN bits used for sequence number in expected rcvs */ |
| 94 | #define BTH_SEQ_MASK 0x7ffull |
| 95 | |
| 96 | /* |
| 97 | * Define fields in the KDETH header so we can update the header |
| 98 | * template. |
| 99 | */ |
| 100 | #define KDETH_OFFSET_SHIFT 0 |
| 101 | #define KDETH_OFFSET_MASK 0x7fff |
| 102 | #define KDETH_OM_SHIFT 15 |
| 103 | #define KDETH_OM_MASK 0x1 |
| 104 | #define KDETH_TID_SHIFT 16 |
| 105 | #define KDETH_TID_MASK 0x3ff |
| 106 | #define KDETH_TIDCTRL_SHIFT 26 |
| 107 | #define KDETH_TIDCTRL_MASK 0x3 |
| 108 | #define KDETH_INTR_SHIFT 28 |
| 109 | #define KDETH_INTR_MASK 0x1 |
| 110 | #define KDETH_SH_SHIFT 29 |
| 111 | #define KDETH_SH_MASK 0x1 |
| 112 | #define KDETH_HCRC_UPPER_SHIFT 16 |
| 113 | #define KDETH_HCRC_UPPER_MASK 0xff |
| 114 | #define KDETH_HCRC_LOWER_SHIFT 24 |
| 115 | #define KDETH_HCRC_LOWER_MASK 0xff |
| 116 | |
| 117 | #define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4) |
| 118 | #define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff) |
| 119 | |
| 120 | #define KDETH_GET(val, field) \ |
| 121 | (((le32_to_cpu((val))) >> KDETH_##field##_SHIFT) & KDETH_##field##_MASK) |
| 122 | #define KDETH_SET(dw, field, val) do { \ |
| 123 | u32 dwval = le32_to_cpu(dw); \ |
| 124 | dwval &= ~(KDETH_##field##_MASK << KDETH_##field##_SHIFT); \ |
| 125 | dwval |= (((val) & KDETH_##field##_MASK) << \ |
| 126 | KDETH_##field##_SHIFT); \ |
| 127 | dw = cpu_to_le32(dwval); \ |
| 128 | } while (0) |
| 129 | |
| 130 | #define AHG_HEADER_SET(arr, idx, dw, bit, width, value) \ |
| 131 | do { \ |
| 132 | if ((idx) < ARRAY_SIZE((arr))) \ |
| 133 | (arr)[(idx++)] = sdma_build_ahg_descriptor( \ |
| 134 | (__force u16)(value), (dw), (bit), \ |
| 135 | (width)); \ |
| 136 | else \ |
| 137 | return -ERANGE; \ |
| 138 | } while (0) |
| 139 | |
| 140 | /* KDETH OM multipliers and switch over point */ |
| 141 | #define KDETH_OM_SMALL 4 |
| 142 | #define KDETH_OM_LARGE 64 |
| 143 | #define KDETH_OM_MAX_SIZE (1 << ((KDETH_OM_LARGE / KDETH_OM_SMALL) + 1)) |
| 144 | |
| 145 | /* Last packet in the request */ |
Sunny Kumar | cb32649 | 2015-11-06 10:06:43 +0530 | [diff] [blame] | 146 | #define TXREQ_FLAGS_REQ_LAST_PKT BIT(0) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 147 | |
Dean Luick | 7b3256e | 2016-07-28 15:21:18 -0400 | [diff] [blame] | 148 | /* SDMA request flag bits */ |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 149 | #define SDMA_REQ_FOR_THREAD 1 |
| 150 | #define SDMA_REQ_SEND_DONE 2 |
| 151 | #define SDMA_REQ_HAVE_AHG 3 |
| 152 | #define SDMA_REQ_HAS_ERROR 4 |
| 153 | #define SDMA_REQ_DONE_ERROR 5 |
| 154 | |
Sunny Kumar | cb32649 | 2015-11-06 10:06:43 +0530 | [diff] [blame] | 155 | #define SDMA_PKT_Q_INACTIVE BIT(0) |
| 156 | #define SDMA_PKT_Q_ACTIVE BIT(1) |
| 157 | #define SDMA_PKT_Q_DEFERRED BIT(2) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 158 | |
| 159 | /* |
| 160 | * Maximum retry attempts to submit a TX request |
| 161 | * before putting the process to sleep. |
| 162 | */ |
| 163 | #define MAX_DEFER_RETRY_COUNT 1 |
| 164 | |
| 165 | static unsigned initial_pkt_count = 8; |
| 166 | |
| 167 | #define SDMA_IOWAIT_TIMEOUT 1000 /* in milliseconds */ |
| 168 | |
Mitko Haralanov | 9565c6a | 2016-05-19 05:21:18 -0700 | [diff] [blame] | 169 | struct sdma_mmu_node; |
| 170 | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 171 | struct user_sdma_iovec { |
Mitko Haralanov | 0f2d87d | 2016-02-03 14:35:06 -0800 | [diff] [blame] | 172 | struct list_head list; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 173 | struct iovec iov; |
| 174 | /* number of pages in this vector */ |
| 175 | unsigned npages; |
| 176 | /* array of pinned pages for this vector */ |
| 177 | struct page **pages; |
Jubin John | 4d114fd | 2016-02-14 20:21:43 -0800 | [diff] [blame] | 178 | /* |
| 179 | * offset into the virtual address space of the vector at |
| 180 | * which we last left off. |
| 181 | */ |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 182 | u64 offset; |
Mitko Haralanov | 9565c6a | 2016-05-19 05:21:18 -0700 | [diff] [blame] | 183 | struct sdma_mmu_node *node; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 184 | }; |
| 185 | |
Bart Van Assche | d55215c | 2016-06-03 12:10:37 -0700 | [diff] [blame] | 186 | #define SDMA_CACHE_NODE_EVICT 0 |
Mitko Haralanov | e88c927 | 2016-04-12 10:46:53 -0700 | [diff] [blame] | 187 | |
Mitko Haralanov | 5cd3a88d | 2016-03-08 11:15:22 -0800 | [diff] [blame] | 188 | struct sdma_mmu_node { |
| 189 | struct mmu_rb_node rb; |
Mitko Haralanov | 5511d78 | 2016-03-08 11:15:44 -0800 | [diff] [blame] | 190 | struct list_head list; |
| 191 | struct hfi1_user_sdma_pkt_q *pq; |
Mitko Haralanov | 5cd3a88d | 2016-03-08 11:15:22 -0800 | [diff] [blame] | 192 | atomic_t refcount; |
| 193 | struct page **pages; |
| 194 | unsigned npages; |
Mitko Haralanov | e88c927 | 2016-04-12 10:46:53 -0700 | [diff] [blame] | 195 | unsigned long flags; |
Mitko Haralanov | 5cd3a88d | 2016-03-08 11:15:22 -0800 | [diff] [blame] | 196 | }; |
| 197 | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 198 | struct user_sdma_request { |
| 199 | struct sdma_req_info info; |
| 200 | struct hfi1_user_sdma_pkt_q *pq; |
| 201 | struct hfi1_user_sdma_comp_q *cq; |
| 202 | /* This is the original header from user space */ |
| 203 | struct hfi1_pkt_header hdr; |
| 204 | /* |
| 205 | * Pointer to the SDMA engine for this request. |
| 206 | * Since different request could be on different VLs, |
| 207 | * each request will need it's own engine pointer. |
| 208 | */ |
| 209 | struct sdma_engine *sde; |
| 210 | u8 ahg_idx; |
| 211 | u32 ahg[9]; |
| 212 | /* |
| 213 | * KDETH.Offset (Eager) field |
| 214 | * We need to remember the initial value so the headers |
| 215 | * can be updated properly. |
| 216 | */ |
| 217 | u32 koffset; |
| 218 | /* |
| 219 | * KDETH.OFFSET (TID) field |
| 220 | * The offset can cover multiple packets, depending on the |
| 221 | * size of the TID entry. |
| 222 | */ |
| 223 | u32 tidoffset; |
| 224 | /* |
| 225 | * KDETH.OM |
| 226 | * Remember this because the header template always sets it |
| 227 | * to 0. |
| 228 | */ |
| 229 | u8 omfactor; |
| 230 | /* |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 231 | * We copy the iovs for this request (based on |
| 232 | * info.iovcnt). These are only the data vectors |
| 233 | */ |
| 234 | unsigned data_iovs; |
| 235 | /* total length of the data in the request */ |
| 236 | u32 data_len; |
| 237 | /* progress index moving along the iovs array */ |
| 238 | unsigned iov_idx; |
| 239 | struct user_sdma_iovec iovs[MAX_VECTORS_PER_REQ]; |
| 240 | /* number of elements copied to the tids array */ |
| 241 | u16 n_tids; |
| 242 | /* TID array values copied from the tid_iov vector */ |
| 243 | u32 *tids; |
| 244 | u16 tididx; |
| 245 | u32 sent; |
| 246 | u64 seqnum; |
Mitko Haralanov | 0f2d87d | 2016-02-03 14:35:06 -0800 | [diff] [blame] | 247 | u64 seqcomp; |
Mitko Haralanov | c7cbf2f | 2016-02-03 14:35:23 -0800 | [diff] [blame] | 248 | u64 seqsubmitted; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 249 | struct list_head txps; |
| 250 | unsigned long flags; |
Mitko Haralanov | a0d4069 | 2015-12-08 17:10:13 -0500 | [diff] [blame] | 251 | /* status of the last txreq completed */ |
| 252 | int status; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 253 | }; |
| 254 | |
Mitko Haralanov | b9fb6318 | 2015-10-26 10:28:37 -0400 | [diff] [blame] | 255 | /* |
| 256 | * A single txreq could span up to 3 physical pages when the MTU |
| 257 | * is sufficiently large (> 4K). Each of the IOV pointers also |
| 258 | * needs it's own set of flags so the vector has been handled |
| 259 | * independently of each other. |
| 260 | */ |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 261 | struct user_sdma_txreq { |
| 262 | /* Packet header for the txreq */ |
| 263 | struct hfi1_pkt_header hdr; |
| 264 | struct sdma_txreq txreq; |
Mitko Haralanov | a0d4069 | 2015-12-08 17:10:13 -0500 | [diff] [blame] | 265 | struct list_head list; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 266 | struct user_sdma_request *req; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 267 | u16 flags; |
| 268 | unsigned busycount; |
| 269 | u64 seqnum; |
| 270 | }; |
| 271 | |
| 272 | #define SDMA_DBG(req, fmt, ...) \ |
| 273 | hfi1_cdbg(SDMA, "[%u:%u:%u:%u] " fmt, (req)->pq->dd->unit, \ |
| 274 | (req)->pq->ctxt, (req)->pq->subctxt, (req)->info.comp_idx, \ |
| 275 | ##__VA_ARGS__) |
| 276 | #define SDMA_Q_DBG(pq, fmt, ...) \ |
| 277 | hfi1_cdbg(SDMA, "[%u:%u:%u] " fmt, (pq)->dd->unit, (pq)->ctxt, \ |
| 278 | (pq)->subctxt, ##__VA_ARGS__) |
| 279 | |
| 280 | static int user_sdma_send_pkts(struct user_sdma_request *, unsigned); |
| 281 | static int num_user_pages(const struct iovec *); |
Mike Marciniszyn | a545f53 | 2016-02-14 12:45:53 -0800 | [diff] [blame] | 282 | static void user_sdma_txreq_cb(struct sdma_txreq *, int); |
Mitko Haralanov | 0f2d87d | 2016-02-03 14:35:06 -0800 | [diff] [blame] | 283 | static inline void pq_update(struct hfi1_user_sdma_pkt_q *); |
| 284 | static void user_sdma_free_request(struct user_sdma_request *, bool); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 285 | static int pin_vector_pages(struct user_sdma_request *, |
| 286 | struct user_sdma_iovec *); |
Mitko Haralanov | 849e3e9 | 2016-04-12 10:46:16 -0700 | [diff] [blame] | 287 | static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned, |
| 288 | unsigned); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 289 | static int check_header_template(struct user_sdma_request *, |
| 290 | struct hfi1_pkt_header *, u32, u32); |
| 291 | static int set_txreq_header(struct user_sdma_request *, |
| 292 | struct user_sdma_txreq *, u32); |
| 293 | static int set_txreq_header_ahg(struct user_sdma_request *, |
| 294 | struct user_sdma_txreq *, u32); |
Mitko Haralanov | 0f2d87d | 2016-02-03 14:35:06 -0800 | [diff] [blame] | 295 | static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *, |
| 296 | struct hfi1_user_sdma_comp_q *, |
| 297 | u16, enum hfi1_sdma_comp_state, int); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 298 | static inline u32 set_pkt_bth_psn(__be32, u8, u32); |
| 299 | static inline u32 get_lrh_len(struct hfi1_pkt_header, u32 len); |
| 300 | |
| 301 | static int defer_packet_queue( |
| 302 | struct sdma_engine *, |
| 303 | struct iowait *, |
| 304 | struct sdma_txreq *, |
| 305 | unsigned seq); |
| 306 | static void activate_packet_queue(struct iowait *, int); |
Mitko Haralanov | 5cd3a88d | 2016-03-08 11:15:22 -0800 | [diff] [blame] | 307 | static bool sdma_rb_filter(struct mmu_rb_node *, unsigned long, unsigned long); |
| 308 | static int sdma_rb_insert(struct rb_root *, struct mmu_rb_node *); |
Mitko Haralanov | f19bd64 | 2016-04-12 10:45:57 -0700 | [diff] [blame] | 309 | static void sdma_rb_remove(struct rb_root *, struct mmu_rb_node *, |
| 310 | struct mm_struct *); |
Mitko Haralanov | 5cd3a88d | 2016-03-08 11:15:22 -0800 | [diff] [blame] | 311 | static int sdma_rb_invalidate(struct rb_root *, struct mmu_rb_node *); |
| 312 | |
| 313 | static struct mmu_rb_ops sdma_rb_ops = { |
| 314 | .filter = sdma_rb_filter, |
| 315 | .insert = sdma_rb_insert, |
| 316 | .remove = sdma_rb_remove, |
| 317 | .invalidate = sdma_rb_invalidate |
| 318 | }; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 319 | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 320 | static int defer_packet_queue( |
| 321 | struct sdma_engine *sde, |
| 322 | struct iowait *wait, |
| 323 | struct sdma_txreq *txreq, |
| 324 | unsigned seq) |
| 325 | { |
| 326 | struct hfi1_user_sdma_pkt_q *pq = |
| 327 | container_of(wait, struct hfi1_user_sdma_pkt_q, busy); |
| 328 | struct hfi1_ibdev *dev = &pq->dd->verbs_dev; |
| 329 | struct user_sdma_txreq *tx = |
| 330 | container_of(txreq, struct user_sdma_txreq, txreq); |
| 331 | |
| 332 | if (sdma_progress(sde, seq, txreq)) { |
| 333 | if (tx->busycount++ < MAX_DEFER_RETRY_COUNT) |
| 334 | goto eagain; |
| 335 | } |
| 336 | /* |
| 337 | * We are assuming that if the list is enqueued somewhere, it |
| 338 | * is to the dmawait list since that is the only place where |
| 339 | * it is supposed to be enqueued. |
| 340 | */ |
| 341 | xchg(&pq->state, SDMA_PKT_Q_DEFERRED); |
| 342 | write_seqlock(&dev->iowait_lock); |
| 343 | if (list_empty(&pq->busy.list)) |
| 344 | list_add_tail(&pq->busy.list, &sde->dmawait); |
| 345 | write_sequnlock(&dev->iowait_lock); |
| 346 | return -EBUSY; |
| 347 | eagain: |
| 348 | return -EAGAIN; |
| 349 | } |
| 350 | |
| 351 | static void activate_packet_queue(struct iowait *wait, int reason) |
| 352 | { |
| 353 | struct hfi1_user_sdma_pkt_q *pq = |
| 354 | container_of(wait, struct hfi1_user_sdma_pkt_q, busy); |
| 355 | xchg(&pq->state, SDMA_PKT_Q_ACTIVE); |
| 356 | wake_up(&wait->wait_dma); |
| 357 | }; |
| 358 | |
| 359 | static void sdma_kmem_cache_ctor(void *obj) |
| 360 | { |
Janani Ravichandran | 16ccad0 | 2016-02-25 15:08:17 -0500 | [diff] [blame] | 361 | struct user_sdma_txreq *tx = obj; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 362 | |
| 363 | memset(tx, 0, sizeof(*tx)); |
| 364 | } |
| 365 | |
| 366 | int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp) |
| 367 | { |
Ira Weiny | 9e10af4 | 2015-10-30 18:58:40 -0400 | [diff] [blame] | 368 | struct hfi1_filedata *fd; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 369 | int ret = 0; |
| 370 | unsigned memsize; |
| 371 | char buf[64]; |
| 372 | struct hfi1_devdata *dd; |
| 373 | struct hfi1_user_sdma_comp_q *cq; |
| 374 | struct hfi1_user_sdma_pkt_q *pq; |
| 375 | unsigned long flags; |
| 376 | |
| 377 | if (!uctxt || !fp) { |
| 378 | ret = -EBADF; |
| 379 | goto done; |
| 380 | } |
| 381 | |
Ira Weiny | 9e10af4 | 2015-10-30 18:58:40 -0400 | [diff] [blame] | 382 | fd = fp->private_data; |
| 383 | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 384 | if (!hfi1_sdma_comp_ring_size) { |
| 385 | ret = -EINVAL; |
| 386 | goto done; |
| 387 | } |
| 388 | |
| 389 | dd = uctxt->dd; |
| 390 | |
| 391 | pq = kzalloc(sizeof(*pq), GFP_KERNEL); |
Alison Schofield | 806e6e1 | 2015-10-12 14:28:36 -0700 | [diff] [blame] | 392 | if (!pq) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 393 | goto pq_nomem; |
Alison Schofield | 806e6e1 | 2015-10-12 14:28:36 -0700 | [diff] [blame] | 394 | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 395 | memsize = sizeof(*pq->reqs) * hfi1_sdma_comp_ring_size; |
Mitko Haralanov | 0f2d87d | 2016-02-03 14:35:06 -0800 | [diff] [blame] | 396 | pq->reqs = kzalloc(memsize, GFP_KERNEL); |
Alison Schofield | 806e6e1 | 2015-10-12 14:28:36 -0700 | [diff] [blame] | 397 | if (!pq->reqs) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 398 | goto pq_reqs_nomem; |
Alison Schofield | 806e6e1 | 2015-10-12 14:28:36 -0700 | [diff] [blame] | 399 | |
Dean Luick | 7b3256e | 2016-07-28 15:21:18 -0400 | [diff] [blame] | 400 | memsize = BITS_TO_LONGS(hfi1_sdma_comp_ring_size) * sizeof(long); |
| 401 | pq->req_in_use = kzalloc(memsize, GFP_KERNEL); |
| 402 | if (!pq->req_in_use) |
| 403 | goto pq_reqs_no_in_use; |
| 404 | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 405 | INIT_LIST_HEAD(&pq->list); |
| 406 | pq->dd = dd; |
| 407 | pq->ctxt = uctxt->ctxt; |
Ira Weiny | 9e10af4 | 2015-10-30 18:58:40 -0400 | [diff] [blame] | 408 | pq->subctxt = fd->subctxt; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 409 | pq->n_max_reqs = hfi1_sdma_comp_ring_size; |
| 410 | pq->state = SDMA_PKT_Q_INACTIVE; |
| 411 | atomic_set(&pq->n_reqs, 0); |
Mitko Haralanov | a0d4069 | 2015-12-08 17:10:13 -0500 | [diff] [blame] | 412 | init_waitqueue_head(&pq->wait); |
Mitko Haralanov | 5cd3a88d | 2016-03-08 11:15:22 -0800 | [diff] [blame] | 413 | pq->sdma_rb_root = RB_ROOT; |
Mitko Haralanov | 5511d78 | 2016-03-08 11:15:44 -0800 | [diff] [blame] | 414 | INIT_LIST_HEAD(&pq->evict); |
| 415 | spin_lock_init(&pq->evict_lock); |
Ira Weiny | 3faa3d9 | 2016-07-28 15:21:19 -0400 | [diff] [blame^] | 416 | pq->mm = fd->mm; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 417 | |
| 418 | iowait_init(&pq->busy, 0, NULL, defer_packet_queue, |
Mike Marciniszyn | a545f53 | 2016-02-14 12:45:53 -0800 | [diff] [blame] | 419 | activate_packet_queue, NULL); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 420 | pq->reqidx = 0; |
| 421 | snprintf(buf, 64, "txreq-kmem-cache-%u-%u-%u", dd->unit, uctxt->ctxt, |
Ira Weiny | 9e10af4 | 2015-10-30 18:58:40 -0400 | [diff] [blame] | 422 | fd->subctxt); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 423 | pq->txreq_cache = kmem_cache_create(buf, |
| 424 | sizeof(struct user_sdma_txreq), |
| 425 | L1_CACHE_BYTES, |
| 426 | SLAB_HWCACHE_ALIGN, |
| 427 | sdma_kmem_cache_ctor); |
| 428 | if (!pq->txreq_cache) { |
| 429 | dd_dev_err(dd, "[%u] Failed to allocate TxReq cache\n", |
| 430 | uctxt->ctxt); |
| 431 | goto pq_txreq_nomem; |
| 432 | } |
Ira Weiny | 9e10af4 | 2015-10-30 18:58:40 -0400 | [diff] [blame] | 433 | fd->pq = pq; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 434 | cq = kzalloc(sizeof(*cq), GFP_KERNEL); |
Alison Schofield | 806e6e1 | 2015-10-12 14:28:36 -0700 | [diff] [blame] | 435 | if (!cq) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 436 | goto cq_nomem; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 437 | |
Amitoj Kaur Chawla | 8444991 | 2016-03-04 22:30:43 +0530 | [diff] [blame] | 438 | memsize = PAGE_ALIGN(sizeof(*cq->comps) * hfi1_sdma_comp_ring_size); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 439 | cq->comps = vmalloc_user(memsize); |
Alison Schofield | 806e6e1 | 2015-10-12 14:28:36 -0700 | [diff] [blame] | 440 | if (!cq->comps) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 441 | goto cq_comps_nomem; |
Alison Schofield | 806e6e1 | 2015-10-12 14:28:36 -0700 | [diff] [blame] | 442 | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 443 | cq->nentries = hfi1_sdma_comp_ring_size; |
Ira Weiny | 9e10af4 | 2015-10-30 18:58:40 -0400 | [diff] [blame] | 444 | fd->cq = cq; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 445 | |
Ira Weiny | 3faa3d9 | 2016-07-28 15:21:19 -0400 | [diff] [blame^] | 446 | ret = hfi1_mmu_rb_register(pq->mm, &pq->sdma_rb_root, &sdma_rb_ops); |
Mitko Haralanov | 5cd3a88d | 2016-03-08 11:15:22 -0800 | [diff] [blame] | 447 | if (ret) { |
| 448 | dd_dev_err(dd, "Failed to register with MMU %d", ret); |
| 449 | goto done; |
| 450 | } |
| 451 | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 452 | spin_lock_irqsave(&uctxt->sdma_qlock, flags); |
| 453 | list_add(&pq->list, &uctxt->sdma_queues); |
| 454 | spin_unlock_irqrestore(&uctxt->sdma_qlock, flags); |
| 455 | goto done; |
| 456 | |
| 457 | cq_comps_nomem: |
| 458 | kfree(cq); |
| 459 | cq_nomem: |
| 460 | kmem_cache_destroy(pq->txreq_cache); |
| 461 | pq_txreq_nomem: |
Dean Luick | 7b3256e | 2016-07-28 15:21:18 -0400 | [diff] [blame] | 462 | kfree(pq->req_in_use); |
| 463 | pq_reqs_no_in_use: |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 464 | kfree(pq->reqs); |
| 465 | pq_reqs_nomem: |
| 466 | kfree(pq); |
Ira Weiny | 9e10af4 | 2015-10-30 18:58:40 -0400 | [diff] [blame] | 467 | fd->pq = NULL; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 468 | pq_nomem: |
| 469 | ret = -ENOMEM; |
| 470 | done: |
| 471 | return ret; |
| 472 | } |
| 473 | |
| 474 | int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd) |
| 475 | { |
| 476 | struct hfi1_ctxtdata *uctxt = fd->uctxt; |
| 477 | struct hfi1_user_sdma_pkt_q *pq; |
| 478 | unsigned long flags; |
| 479 | |
| 480 | hfi1_cdbg(SDMA, "[%u:%u:%u] Freeing user SDMA queues", uctxt->dd->unit, |
| 481 | uctxt->ctxt, fd->subctxt); |
| 482 | pq = fd->pq; |
| 483 | if (pq) { |
Ira Weiny | 53445bb | 2016-07-28 15:21:12 -0400 | [diff] [blame] | 484 | hfi1_mmu_rb_unregister(&pq->sdma_rb_root); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 485 | spin_lock_irqsave(&uctxt->sdma_qlock, flags); |
| 486 | if (!list_empty(&pq->list)) |
| 487 | list_del_init(&pq->list); |
| 488 | spin_unlock_irqrestore(&uctxt->sdma_qlock, flags); |
| 489 | iowait_sdma_drain(&pq->busy); |
Mitko Haralanov | a0d4069 | 2015-12-08 17:10:13 -0500 | [diff] [blame] | 490 | /* Wait until all requests have been freed. */ |
| 491 | wait_event_interruptible( |
| 492 | pq->wait, |
| 493 | (ACCESS_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE)); |
| 494 | kfree(pq->reqs); |
Dean Luick | 7b3256e | 2016-07-28 15:21:18 -0400 | [diff] [blame] | 495 | kfree(pq->req_in_use); |
Julia Lawall | adad44d | 2015-09-13 14:15:04 +0200 | [diff] [blame] | 496 | kmem_cache_destroy(pq->txreq_cache); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 497 | kfree(pq); |
| 498 | fd->pq = NULL; |
| 499 | } |
| 500 | if (fd->cq) { |
Bhumika Goyal | a4d7d05 | 2016-02-14 20:34:28 +0530 | [diff] [blame] | 501 | vfree(fd->cq->comps); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 502 | kfree(fd->cq); |
| 503 | fd->cq = NULL; |
| 504 | } |
| 505 | return 0; |
| 506 | } |
| 507 | |
Jianxin Xiong | 14833b8 | 2016-07-01 16:01:56 -0700 | [diff] [blame] | 508 | static u8 dlid_to_selector(u16 dlid) |
| 509 | { |
| 510 | static u8 mapping[256]; |
| 511 | static int initialized; |
| 512 | static u8 next; |
| 513 | int hash; |
| 514 | |
| 515 | if (!initialized) { |
| 516 | memset(mapping, 0xFF, 256); |
| 517 | initialized = 1; |
| 518 | } |
| 519 | |
| 520 | hash = ((dlid >> 8) ^ dlid) & 0xFF; |
| 521 | if (mapping[hash] == 0xFF) { |
| 522 | mapping[hash] = next; |
| 523 | next = (next + 1) & 0x7F; |
| 524 | } |
| 525 | |
| 526 | return mapping[hash]; |
| 527 | } |
| 528 | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 529 | int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec, |
| 530 | unsigned long dim, unsigned long *count) |
| 531 | { |
Dean Luick | ff4ce9b | 2016-07-28 12:27:34 -0400 | [diff] [blame] | 532 | int ret = 0, i; |
Ira Weiny | 9e10af4 | 2015-10-30 18:58:40 -0400 | [diff] [blame] | 533 | struct hfi1_filedata *fd = fp->private_data; |
| 534 | struct hfi1_ctxtdata *uctxt = fd->uctxt; |
| 535 | struct hfi1_user_sdma_pkt_q *pq = fd->pq; |
| 536 | struct hfi1_user_sdma_comp_q *cq = fd->cq; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 537 | struct hfi1_devdata *dd = pq->dd; |
| 538 | unsigned long idx = 0; |
| 539 | u8 pcount = initial_pkt_count; |
| 540 | struct sdma_req_info info; |
| 541 | struct user_sdma_request *req; |
| 542 | u8 opcode, sc, vl; |
Jianxin Xiong | b583faf | 2016-05-19 05:21:57 -0700 | [diff] [blame] | 543 | int req_queued = 0; |
Jianxin Xiong | 14833b8 | 2016-07-01 16:01:56 -0700 | [diff] [blame] | 544 | u16 dlid; |
| 545 | u8 selector; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 546 | |
| 547 | if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) { |
| 548 | hfi1_cdbg( |
| 549 | SDMA, |
| 550 | "[%u:%u:%u] First vector not big enough for header %lu/%lu", |
Ira Weiny | 9e10af4 | 2015-10-30 18:58:40 -0400 | [diff] [blame] | 551 | dd->unit, uctxt->ctxt, fd->subctxt, |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 552 | iovec[idx].iov_len, sizeof(info) + sizeof(req->hdr)); |
Mitko Haralanov | faa98b8 | 2015-12-08 17:10:11 -0500 | [diff] [blame] | 553 | return -EINVAL; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 554 | } |
| 555 | ret = copy_from_user(&info, iovec[idx].iov_base, sizeof(info)); |
| 556 | if (ret) { |
| 557 | hfi1_cdbg(SDMA, "[%u:%u:%u] Failed to copy info QW (%d)", |
Ira Weiny | 9e10af4 | 2015-10-30 18:58:40 -0400 | [diff] [blame] | 558 | dd->unit, uctxt->ctxt, fd->subctxt, ret); |
Mitko Haralanov | faa98b8 | 2015-12-08 17:10:11 -0500 | [diff] [blame] | 559 | return -EFAULT; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 560 | } |
Mitko Haralanov | 0f2d87d | 2016-02-03 14:35:06 -0800 | [diff] [blame] | 561 | |
Ira Weiny | 9e10af4 | 2015-10-30 18:58:40 -0400 | [diff] [blame] | 562 | trace_hfi1_sdma_user_reqinfo(dd, uctxt->ctxt, fd->subctxt, |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 563 | (u16 *)&info); |
Dean Luick | 4fa0d22 | 2016-07-28 15:21:14 -0400 | [diff] [blame] | 564 | |
| 565 | if (info.comp_idx >= hfi1_sdma_comp_ring_size) { |
| 566 | hfi1_cdbg(SDMA, |
| 567 | "[%u:%u:%u:%u] Invalid comp index", |
| 568 | dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx); |
| 569 | return -EINVAL; |
| 570 | } |
| 571 | |
Dean Luick | 9ff73c8 | 2016-07-28 15:21:15 -0400 | [diff] [blame] | 572 | /* |
| 573 | * Sanity check the header io vector count. Need at least 1 vector |
| 574 | * (header) and cannot be larger than the actual io vector count. |
| 575 | */ |
| 576 | if (req_iovcnt(info.ctrl) < 1 || req_iovcnt(info.ctrl) > dim) { |
| 577 | hfi1_cdbg(SDMA, |
| 578 | "[%u:%u:%u:%u] Invalid iov count %d, dim %ld", |
| 579 | dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx, |
| 580 | req_iovcnt(info.ctrl), dim); |
| 581 | return -EINVAL; |
| 582 | } |
| 583 | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 584 | if (!info.fragsize) { |
| 585 | hfi1_cdbg(SDMA, |
| 586 | "[%u:%u:%u:%u] Request does not specify fragsize", |
Ira Weiny | 9e10af4 | 2015-10-30 18:58:40 -0400 | [diff] [blame] | 587 | dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx); |
Mitko Haralanov | faa98b8 | 2015-12-08 17:10:11 -0500 | [diff] [blame] | 588 | return -EINVAL; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 589 | } |
Dean Luick | 7b3256e | 2016-07-28 15:21:18 -0400 | [diff] [blame] | 590 | |
| 591 | /* Try to claim the request. */ |
| 592 | if (test_and_set_bit(info.comp_idx, pq->req_in_use)) { |
| 593 | hfi1_cdbg(SDMA, "[%u:%u:%u] Entry %u is in use", |
| 594 | dd->unit, uctxt->ctxt, fd->subctxt, |
| 595 | info.comp_idx); |
| 596 | return -EBADSLT; |
| 597 | } |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 598 | /* |
Dean Luick | 7b3256e | 2016-07-28 15:21:18 -0400 | [diff] [blame] | 599 | * All safety checks have been done and this request has been claimed. |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 600 | */ |
| 601 | hfi1_cdbg(SDMA, "[%u:%u:%u] Using req/comp entry %u\n", dd->unit, |
Ira Weiny | 9e10af4 | 2015-10-30 18:58:40 -0400 | [diff] [blame] | 602 | uctxt->ctxt, fd->subctxt, info.comp_idx); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 603 | req = pq->reqs + info.comp_idx; |
| 604 | memset(req, 0, sizeof(*req)); |
Dean Luick | 9ff73c8 | 2016-07-28 15:21:15 -0400 | [diff] [blame] | 605 | req->data_iovs = req_iovcnt(info.ctrl) - 1; /* subtract header vector */ |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 606 | req->pq = pq; |
| 607 | req->cq = cq; |
Mitko Haralanov | a0d4069 | 2015-12-08 17:10:13 -0500 | [diff] [blame] | 608 | req->status = -1; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 609 | INIT_LIST_HEAD(&req->txps); |
Mitko Haralanov | a0d4069 | 2015-12-08 17:10:13 -0500 | [diff] [blame] | 610 | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 611 | memcpy(&req->info, &info, sizeof(info)); |
| 612 | |
Dean Luick | 9ff73c8 | 2016-07-28 15:21:15 -0400 | [diff] [blame] | 613 | if (req_opcode(info.ctrl) == EXPECTED) { |
| 614 | /* expected must have a TID info and at least one data vector */ |
| 615 | if (req->data_iovs < 2) { |
| 616 | SDMA_DBG(req, |
| 617 | "Not enough vectors for expected request"); |
| 618 | ret = -EINVAL; |
| 619 | goto free_req; |
| 620 | } |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 621 | req->data_iovs--; |
Dean Luick | 9ff73c8 | 2016-07-28 15:21:15 -0400 | [diff] [blame] | 622 | } |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 623 | |
| 624 | if (!info.npkts || req->data_iovs > MAX_VECTORS_PER_REQ) { |
| 625 | SDMA_DBG(req, "Too many vectors (%u/%u)", req->data_iovs, |
| 626 | MAX_VECTORS_PER_REQ); |
Dean Luick | 9da7e9a | 2016-07-28 15:21:17 -0400 | [diff] [blame] | 627 | ret = -EINVAL; |
| 628 | goto free_req; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 629 | } |
| 630 | /* Copy the header from the user buffer */ |
| 631 | ret = copy_from_user(&req->hdr, iovec[idx].iov_base + sizeof(info), |
| 632 | sizeof(req->hdr)); |
| 633 | if (ret) { |
| 634 | SDMA_DBG(req, "Failed to copy header template (%d)", ret); |
| 635 | ret = -EFAULT; |
| 636 | goto free_req; |
| 637 | } |
| 638 | |
| 639 | /* If Static rate control is not enabled, sanitize the header. */ |
| 640 | if (!HFI1_CAP_IS_USET(STATIC_RATE_CTRL)) |
| 641 | req->hdr.pbc[2] = 0; |
| 642 | |
| 643 | /* Validate the opcode. Do not trust packets from user space blindly. */ |
| 644 | opcode = (be32_to_cpu(req->hdr.bth[0]) >> 24) & 0xff; |
| 645 | if ((opcode & USER_OPCODE_CHECK_MASK) != |
| 646 | USER_OPCODE_CHECK_VAL) { |
| 647 | SDMA_DBG(req, "Invalid opcode (%d)", opcode); |
| 648 | ret = -EINVAL; |
| 649 | goto free_req; |
| 650 | } |
| 651 | /* |
| 652 | * Validate the vl. Do not trust packets from user space blindly. |
| 653 | * VL comes from PBC, SC comes from LRH, and the VL needs to |
| 654 | * match the SC look up. |
| 655 | */ |
| 656 | vl = (le16_to_cpu(req->hdr.pbc[0]) >> 12) & 0xF; |
| 657 | sc = (((be16_to_cpu(req->hdr.lrh[0]) >> 12) & 0xF) | |
| 658 | (((le16_to_cpu(req->hdr.pbc[1]) >> 14) & 0x1) << 4)); |
| 659 | if (vl >= dd->pport->vls_operational || |
| 660 | vl != sc_to_vlt(dd, sc)) { |
| 661 | SDMA_DBG(req, "Invalid SC(%u)/VL(%u)", sc, vl); |
| 662 | ret = -EINVAL; |
| 663 | goto free_req; |
| 664 | } |
| 665 | |
Sebastian Sanchez | e38d1e4 | 2016-04-12 11:22:21 -0700 | [diff] [blame] | 666 | /* Checking P_KEY for requests from user-space */ |
| 667 | if (egress_pkey_check(dd->pport, req->hdr.lrh, req->hdr.bth, sc, |
| 668 | PKEY_CHECK_INVALID)) { |
| 669 | ret = -EINVAL; |
| 670 | goto free_req; |
| 671 | } |
| 672 | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 673 | /* |
| 674 | * Also should check the BTH.lnh. If it says the next header is GRH then |
| 675 | * the RXE parsing will be off and will land in the middle of the KDETH |
| 676 | * or miss it entirely. |
| 677 | */ |
| 678 | if ((be16_to_cpu(req->hdr.lrh[0]) & 0x3) == HFI1_LRH_GRH) { |
| 679 | SDMA_DBG(req, "User tried to pass in a GRH"); |
| 680 | ret = -EINVAL; |
| 681 | goto free_req; |
| 682 | } |
| 683 | |
| 684 | req->koffset = le32_to_cpu(req->hdr.kdeth.swdata[6]); |
Jubin John | 4d114fd | 2016-02-14 20:21:43 -0800 | [diff] [blame] | 685 | /* |
| 686 | * Calculate the initial TID offset based on the values of |
| 687 | * KDETH.OFFSET and KDETH.OM that are passed in. |
| 688 | */ |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 689 | req->tidoffset = KDETH_GET(req->hdr.kdeth.ver_tid_offset, OFFSET) * |
| 690 | (KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ? |
| 691 | KDETH_OM_LARGE : KDETH_OM_SMALL); |
| 692 | SDMA_DBG(req, "Initial TID offset %u", req->tidoffset); |
| 693 | idx++; |
| 694 | |
| 695 | /* Save all the IO vector structures */ |
Dean Luick | ff4ce9b | 2016-07-28 12:27:34 -0400 | [diff] [blame] | 696 | for (i = 0; i < req->data_iovs; i++) { |
Mitko Haralanov | 0f2d87d | 2016-02-03 14:35:06 -0800 | [diff] [blame] | 697 | INIT_LIST_HEAD(&req->iovs[i].list); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 698 | memcpy(&req->iovs[i].iov, iovec + idx++, sizeof(struct iovec)); |
Mitko Haralanov | 5cd3a88d | 2016-03-08 11:15:22 -0800 | [diff] [blame] | 699 | ret = pin_vector_pages(req, &req->iovs[i]); |
| 700 | if (ret) { |
| 701 | req->status = ret; |
| 702 | goto free_req; |
| 703 | } |
Dean Luick | ff4ce9b | 2016-07-28 12:27:34 -0400 | [diff] [blame] | 704 | req->data_len += req->iovs[i].iov.iov_len; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 705 | } |
| 706 | SDMA_DBG(req, "total data length %u", req->data_len); |
| 707 | |
| 708 | if (pcount > req->info.npkts) |
| 709 | pcount = req->info.npkts; |
| 710 | /* |
| 711 | * Copy any TID info |
| 712 | * User space will provide the TID info only when the |
| 713 | * request type is EXPECTED. This is true even if there is |
| 714 | * only one packet in the request and the header is already |
| 715 | * setup. The reason for the singular TID case is that the |
| 716 | * driver needs to perform safety checks. |
| 717 | */ |
| 718 | if (req_opcode(req->info.ctrl) == EXPECTED) { |
| 719 | u16 ntids = iovec[idx].iov_len / sizeof(*req->tids); |
| 720 | |
| 721 | if (!ntids || ntids > MAX_TID_PAIR_ENTRIES) { |
| 722 | ret = -EINVAL; |
| 723 | goto free_req; |
| 724 | } |
| 725 | req->tids = kcalloc(ntids, sizeof(*req->tids), GFP_KERNEL); |
| 726 | if (!req->tids) { |
| 727 | ret = -ENOMEM; |
| 728 | goto free_req; |
| 729 | } |
| 730 | /* |
| 731 | * We have to copy all of the tids because they may vary |
| 732 | * in size and, therefore, the TID count might not be |
| 733 | * equal to the pkt count. However, there is no way to |
| 734 | * tell at this point. |
| 735 | */ |
| 736 | ret = copy_from_user(req->tids, iovec[idx].iov_base, |
| 737 | ntids * sizeof(*req->tids)); |
| 738 | if (ret) { |
| 739 | SDMA_DBG(req, "Failed to copy %d TIDs (%d)", |
| 740 | ntids, ret); |
| 741 | ret = -EFAULT; |
| 742 | goto free_req; |
| 743 | } |
| 744 | req->n_tids = ntids; |
| 745 | idx++; |
| 746 | } |
| 747 | |
Jianxin Xiong | 14833b8 | 2016-07-01 16:01:56 -0700 | [diff] [blame] | 748 | dlid = be16_to_cpu(req->hdr.lrh[1]); |
| 749 | selector = dlid_to_selector(dlid); |
| 750 | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 751 | /* Have to select the engine */ |
| 752 | req->sde = sdma_select_engine_vl(dd, |
Jianxin Xiong | 14833b8 | 2016-07-01 16:01:56 -0700 | [diff] [blame] | 753 | (u32)(uctxt->ctxt + fd->subctxt + |
| 754 | selector), |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 755 | vl); |
| 756 | if (!req->sde || !sdma_running(req->sde)) { |
| 757 | ret = -ECOMM; |
| 758 | goto free_req; |
| 759 | } |
| 760 | |
| 761 | /* We don't need an AHG entry if the request contains only one packet */ |
| 762 | if (req->info.npkts > 1 && HFI1_CAP_IS_USET(SDMA_AHG)) { |
| 763 | int ahg = sdma_ahg_alloc(req->sde); |
| 764 | |
| 765 | if (likely(ahg >= 0)) { |
| 766 | req->ahg_idx = (u8)ahg; |
| 767 | set_bit(SDMA_REQ_HAVE_AHG, &req->flags); |
| 768 | } |
| 769 | } |
| 770 | |
Mitko Haralanov | 0f2d87d | 2016-02-03 14:35:06 -0800 | [diff] [blame] | 771 | set_comp_state(pq, cq, info.comp_idx, QUEUED, 0); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 772 | atomic_inc(&pq->n_reqs); |
Jianxin Xiong | b583faf | 2016-05-19 05:21:57 -0700 | [diff] [blame] | 773 | req_queued = 1; |
Mitko Haralanov | 0f2d87d | 2016-02-03 14:35:06 -0800 | [diff] [blame] | 774 | /* Send the first N packets in the request to buy us some time */ |
| 775 | ret = user_sdma_send_pkts(req, pcount); |
| 776 | if (unlikely(ret < 0 && ret != -EBUSY)) { |
| 777 | req->status = ret; |
Mitko Haralanov | 0f2d87d | 2016-02-03 14:35:06 -0800 | [diff] [blame] | 778 | goto free_req; |
| 779 | } |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 780 | |
Mitko Haralanov | 0f2d87d | 2016-02-03 14:35:06 -0800 | [diff] [blame] | 781 | /* |
| 782 | * It is possible that the SDMA engine would have processed all the |
| 783 | * submitted packets by the time we get here. Therefore, only set |
| 784 | * packet queue state to ACTIVE if there are still uncompleted |
| 785 | * requests. |
| 786 | */ |
| 787 | if (atomic_read(&pq->n_reqs)) |
| 788 | xchg(&pq->state, SDMA_PKT_Q_ACTIVE); |
| 789 | |
| 790 | /* |
| 791 | * This is a somewhat blocking send implementation. |
| 792 | * The driver will block the caller until all packets of the |
| 793 | * request have been submitted to the SDMA engine. However, it |
| 794 | * will not wait for send completions. |
| 795 | */ |
| 796 | while (!test_bit(SDMA_REQ_SEND_DONE, &req->flags)) { |
| 797 | ret = user_sdma_send_pkts(req, pcount); |
| 798 | if (ret < 0) { |
| 799 | if (ret != -EBUSY) { |
| 800 | req->status = ret; |
| 801 | set_bit(SDMA_REQ_DONE_ERROR, &req->flags); |
Mitko Haralanov | a402d6a | 2016-02-03 14:37:41 -0800 | [diff] [blame] | 802 | if (ACCESS_ONCE(req->seqcomp) == |
| 803 | req->seqsubmitted - 1) |
| 804 | goto free_req; |
Mitko Haralanov | 0f2d87d | 2016-02-03 14:35:06 -0800 | [diff] [blame] | 805 | return ret; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 806 | } |
Mitko Haralanov | 0f2d87d | 2016-02-03 14:35:06 -0800 | [diff] [blame] | 807 | wait_event_interruptible_timeout( |
| 808 | pq->busy.wait_dma, |
| 809 | (pq->state == SDMA_PKT_Q_ACTIVE), |
| 810 | msecs_to_jiffies( |
| 811 | SDMA_IOWAIT_TIMEOUT)); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 812 | } |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 813 | } |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 814 | *count += idx; |
Mitko Haralanov | a0d4069 | 2015-12-08 17:10:13 -0500 | [diff] [blame] | 815 | return 0; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 816 | free_req: |
Mitko Haralanov | 0f2d87d | 2016-02-03 14:35:06 -0800 | [diff] [blame] | 817 | user_sdma_free_request(req, true); |
Jianxin Xiong | b583faf | 2016-05-19 05:21:57 -0700 | [diff] [blame] | 818 | if (req_queued) |
| 819 | pq_update(pq); |
Mitko Haralanov | 0f2d87d | 2016-02-03 14:35:06 -0800 | [diff] [blame] | 820 | set_comp_state(pq, cq, info.comp_idx, ERROR, req->status); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 821 | return ret; |
| 822 | } |
| 823 | |
| 824 | static inline u32 compute_data_length(struct user_sdma_request *req, |
Jubin John | 17fb4f2 | 2016-02-14 20:21:52 -0800 | [diff] [blame] | 825 | struct user_sdma_txreq *tx) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 826 | { |
| 827 | /* |
| 828 | * Determine the proper size of the packet data. |
| 829 | * The size of the data of the first packet is in the header |
| 830 | * template. However, it includes the header and ICRC, which need |
| 831 | * to be subtracted. |
Ira Weiny | c492980 | 2016-07-27 21:08:42 -0400 | [diff] [blame] | 832 | * The minimum representable packet data length in a header is 4 bytes, |
| 833 | * therefore, when the data length request is less than 4 bytes, there's |
| 834 | * only one packet, and the packet data length is equal to that of the |
| 835 | * request data length. |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 836 | * The size of the remaining packets is the minimum of the frag |
| 837 | * size (MTU) or remaining data in the request. |
| 838 | */ |
| 839 | u32 len; |
| 840 | |
| 841 | if (!req->seqnum) { |
Ira Weiny | c492980 | 2016-07-27 21:08:42 -0400 | [diff] [blame] | 842 | if (req->data_len < sizeof(u32)) |
| 843 | len = req->data_len; |
| 844 | else |
| 845 | len = ((be16_to_cpu(req->hdr.lrh[2]) << 2) - |
| 846 | (sizeof(tx->hdr) - 4)); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 847 | } else if (req_opcode(req->info.ctrl) == EXPECTED) { |
| 848 | u32 tidlen = EXP_TID_GET(req->tids[req->tididx], LEN) * |
| 849 | PAGE_SIZE; |
Jubin John | 4d114fd | 2016-02-14 20:21:43 -0800 | [diff] [blame] | 850 | /* |
| 851 | * Get the data length based on the remaining space in the |
| 852 | * TID pair. |
| 853 | */ |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 854 | len = min(tidlen - req->tidoffset, (u32)req->info.fragsize); |
| 855 | /* If we've filled up the TID pair, move to the next one. */ |
| 856 | if (unlikely(!len) && ++req->tididx < req->n_tids && |
| 857 | req->tids[req->tididx]) { |
| 858 | tidlen = EXP_TID_GET(req->tids[req->tididx], |
| 859 | LEN) * PAGE_SIZE; |
| 860 | req->tidoffset = 0; |
| 861 | len = min_t(u32, tidlen, req->info.fragsize); |
| 862 | } |
Jubin John | 4d114fd | 2016-02-14 20:21:43 -0800 | [diff] [blame] | 863 | /* |
| 864 | * Since the TID pairs map entire pages, make sure that we |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 865 | * are not going to try to send more data that we have |
Jubin John | 4d114fd | 2016-02-14 20:21:43 -0800 | [diff] [blame] | 866 | * remaining. |
| 867 | */ |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 868 | len = min(len, req->data_len - req->sent); |
Jubin John | e490974 | 2016-02-14 20:22:00 -0800 | [diff] [blame] | 869 | } else { |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 870 | len = min(req->data_len - req->sent, (u32)req->info.fragsize); |
Jubin John | e490974 | 2016-02-14 20:22:00 -0800 | [diff] [blame] | 871 | } |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 872 | SDMA_DBG(req, "Data Length = %u", len); |
| 873 | return len; |
| 874 | } |
| 875 | |
Ira Weiny | c492980 | 2016-07-27 21:08:42 -0400 | [diff] [blame] | 876 | static inline u32 pad_len(u32 len) |
| 877 | { |
| 878 | if (len & (sizeof(u32) - 1)) |
| 879 | len += sizeof(u32) - (len & (sizeof(u32) - 1)); |
| 880 | return len; |
| 881 | } |
| 882 | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 883 | static inline u32 get_lrh_len(struct hfi1_pkt_header hdr, u32 len) |
| 884 | { |
| 885 | /* (Size of complete header - size of PBC) + 4B ICRC + data length */ |
| 886 | return ((sizeof(hdr) - sizeof(hdr.pbc)) + 4 + len); |
| 887 | } |
| 888 | |
| 889 | static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts) |
| 890 | { |
| 891 | int ret = 0; |
| 892 | unsigned npkts = 0; |
| 893 | struct user_sdma_txreq *tx = NULL; |
| 894 | struct hfi1_user_sdma_pkt_q *pq = NULL; |
| 895 | struct user_sdma_iovec *iovec = NULL; |
| 896 | |
Mitko Haralanov | faa98b8 | 2015-12-08 17:10:11 -0500 | [diff] [blame] | 897 | if (!req->pq) |
| 898 | return -EINVAL; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 899 | |
| 900 | pq = req->pq; |
| 901 | |
Mitko Haralanov | 6a5464f | 2015-12-08 17:10:12 -0500 | [diff] [blame] | 902 | /* If tx completion has reported an error, we are done. */ |
| 903 | if (test_bit(SDMA_REQ_HAS_ERROR, &req->flags)) { |
| 904 | set_bit(SDMA_REQ_DONE_ERROR, &req->flags); |
| 905 | return -EFAULT; |
| 906 | } |
| 907 | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 908 | /* |
| 909 | * Check if we might have sent the entire request already |
| 910 | */ |
| 911 | if (unlikely(req->seqnum == req->info.npkts)) { |
| 912 | if (!list_empty(&req->txps)) |
| 913 | goto dosend; |
Mitko Haralanov | faa98b8 | 2015-12-08 17:10:11 -0500 | [diff] [blame] | 914 | return ret; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 915 | } |
| 916 | |
| 917 | if (!maxpkts || maxpkts > req->info.npkts - req->seqnum) |
| 918 | maxpkts = req->info.npkts - req->seqnum; |
| 919 | |
| 920 | while (npkts < maxpkts) { |
| 921 | u32 datalen = 0, queued = 0, data_sent = 0; |
| 922 | u64 iov_offset = 0; |
| 923 | |
| 924 | /* |
| 925 | * Check whether any of the completions have come back |
| 926 | * with errors. If so, we are not going to process any |
| 927 | * more packets from this request. |
| 928 | */ |
| 929 | if (test_bit(SDMA_REQ_HAS_ERROR, &req->flags)) { |
| 930 | set_bit(SDMA_REQ_DONE_ERROR, &req->flags); |
Mitko Haralanov | faa98b8 | 2015-12-08 17:10:11 -0500 | [diff] [blame] | 931 | return -EFAULT; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 932 | } |
| 933 | |
| 934 | tx = kmem_cache_alloc(pq->txreq_cache, GFP_KERNEL); |
Mitko Haralanov | faa98b8 | 2015-12-08 17:10:11 -0500 | [diff] [blame] | 935 | if (!tx) |
| 936 | return -ENOMEM; |
| 937 | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 938 | tx->flags = 0; |
| 939 | tx->req = req; |
| 940 | tx->busycount = 0; |
Mitko Haralanov | a0d4069 | 2015-12-08 17:10:13 -0500 | [diff] [blame] | 941 | INIT_LIST_HEAD(&tx->list); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 942 | |
| 943 | if (req->seqnum == req->info.npkts - 1) |
Mitko Haralanov | b9fb6318 | 2015-10-26 10:28:37 -0400 | [diff] [blame] | 944 | tx->flags |= TXREQ_FLAGS_REQ_LAST_PKT; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 945 | |
| 946 | /* |
| 947 | * Calculate the payload size - this is min of the fragment |
| 948 | * (MTU) size or the remaining bytes in the request but only |
| 949 | * if we have payload data. |
| 950 | */ |
| 951 | if (req->data_len) { |
| 952 | iovec = &req->iovs[req->iov_idx]; |
| 953 | if (ACCESS_ONCE(iovec->offset) == iovec->iov.iov_len) { |
| 954 | if (++req->iov_idx == req->data_iovs) { |
| 955 | ret = -EFAULT; |
| 956 | goto free_txreq; |
| 957 | } |
| 958 | iovec = &req->iovs[req->iov_idx]; |
| 959 | WARN_ON(iovec->offset); |
| 960 | } |
| 961 | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 962 | datalen = compute_data_length(req, tx); |
| 963 | if (!datalen) { |
| 964 | SDMA_DBG(req, |
| 965 | "Request has data but pkt len is 0"); |
| 966 | ret = -EFAULT; |
| 967 | goto free_tx; |
| 968 | } |
| 969 | } |
| 970 | |
| 971 | if (test_bit(SDMA_REQ_HAVE_AHG, &req->flags)) { |
| 972 | if (!req->seqnum) { |
| 973 | u16 pbclen = le16_to_cpu(req->hdr.pbc[0]); |
Ira Weiny | c492980 | 2016-07-27 21:08:42 -0400 | [diff] [blame] | 974 | u32 lrhlen = get_lrh_len(req->hdr, |
| 975 | pad_len(datalen)); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 976 | /* |
| 977 | * Copy the request header into the tx header |
| 978 | * because the HW needs a cacheline-aligned |
| 979 | * address. |
| 980 | * This copy can be optimized out if the hdr |
| 981 | * member of user_sdma_request were also |
| 982 | * cacheline aligned. |
| 983 | */ |
| 984 | memcpy(&tx->hdr, &req->hdr, sizeof(tx->hdr)); |
| 985 | if (PBC2LRH(pbclen) != lrhlen) { |
| 986 | pbclen = (pbclen & 0xf000) | |
| 987 | LRH2PBC(lrhlen); |
| 988 | tx->hdr.pbc[0] = cpu_to_le16(pbclen); |
| 989 | } |
| 990 | ret = sdma_txinit_ahg(&tx->txreq, |
| 991 | SDMA_TXREQ_F_AHG_COPY, |
| 992 | sizeof(tx->hdr) + datalen, |
| 993 | req->ahg_idx, 0, NULL, 0, |
| 994 | user_sdma_txreq_cb); |
| 995 | if (ret) |
| 996 | goto free_tx; |
| 997 | ret = sdma_txadd_kvaddr(pq->dd, &tx->txreq, |
| 998 | &tx->hdr, |
| 999 | sizeof(tx->hdr)); |
| 1000 | if (ret) |
| 1001 | goto free_txreq; |
| 1002 | } else { |
| 1003 | int changes; |
| 1004 | |
| 1005 | changes = set_txreq_header_ahg(req, tx, |
| 1006 | datalen); |
| 1007 | if (changes < 0) |
| 1008 | goto free_tx; |
| 1009 | sdma_txinit_ahg(&tx->txreq, |
| 1010 | SDMA_TXREQ_F_USE_AHG, |
| 1011 | datalen, req->ahg_idx, changes, |
| 1012 | req->ahg, sizeof(req->hdr), |
| 1013 | user_sdma_txreq_cb); |
| 1014 | } |
| 1015 | } else { |
| 1016 | ret = sdma_txinit(&tx->txreq, 0, sizeof(req->hdr) + |
| 1017 | datalen, user_sdma_txreq_cb); |
| 1018 | if (ret) |
| 1019 | goto free_tx; |
| 1020 | /* |
| 1021 | * Modify the header for this packet. This only needs |
| 1022 | * to be done if we are not going to use AHG. Otherwise, |
| 1023 | * the HW will do it based on the changes we gave it |
| 1024 | * during sdma_txinit_ahg(). |
| 1025 | */ |
| 1026 | ret = set_txreq_header(req, tx, datalen); |
| 1027 | if (ret) |
| 1028 | goto free_txreq; |
| 1029 | } |
| 1030 | |
| 1031 | /* |
| 1032 | * If the request contains any data vectors, add up to |
| 1033 | * fragsize bytes to the descriptor. |
| 1034 | */ |
| 1035 | while (queued < datalen && |
| 1036 | (req->sent + data_sent) < req->data_len) { |
| 1037 | unsigned long base, offset; |
| 1038 | unsigned pageidx, len; |
| 1039 | |
| 1040 | base = (unsigned long)iovec->iov.iov_base; |
Amitoj Kaur Chawla | 72a5f6a | 2016-02-20 19:08:02 +0530 | [diff] [blame] | 1041 | offset = offset_in_page(base + iovec->offset + |
| 1042 | iov_offset); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1043 | pageidx = (((iovec->offset + iov_offset + |
| 1044 | base) - (base & PAGE_MASK)) >> PAGE_SHIFT); |
| 1045 | len = offset + req->info.fragsize > PAGE_SIZE ? |
| 1046 | PAGE_SIZE - offset : req->info.fragsize; |
| 1047 | len = min((datalen - queued), len); |
| 1048 | ret = sdma_txadd_page(pq->dd, &tx->txreq, |
| 1049 | iovec->pages[pageidx], |
| 1050 | offset, len); |
| 1051 | if (ret) { |
Mitko Haralanov | a0d4069 | 2015-12-08 17:10:13 -0500 | [diff] [blame] | 1052 | SDMA_DBG(req, "SDMA txreq add page failed %d\n", |
| 1053 | ret); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1054 | goto free_txreq; |
| 1055 | } |
| 1056 | iov_offset += len; |
| 1057 | queued += len; |
| 1058 | data_sent += len; |
| 1059 | if (unlikely(queued < datalen && |
| 1060 | pageidx == iovec->npages && |
Mitko Haralanov | 5cd3a88d | 2016-03-08 11:15:22 -0800 | [diff] [blame] | 1061 | req->iov_idx < req->data_iovs - 1)) { |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1062 | iovec->offset += iov_offset; |
| 1063 | iovec = &req->iovs[++req->iov_idx]; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1064 | iov_offset = 0; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1065 | } |
| 1066 | } |
| 1067 | /* |
| 1068 | * The txreq was submitted successfully so we can update |
| 1069 | * the counters. |
| 1070 | */ |
| 1071 | req->koffset += datalen; |
| 1072 | if (req_opcode(req->info.ctrl) == EXPECTED) |
| 1073 | req->tidoffset += datalen; |
| 1074 | req->sent += data_sent; |
Mitko Haralanov | 5cd3a88d | 2016-03-08 11:15:22 -0800 | [diff] [blame] | 1075 | if (req->data_len) |
| 1076 | iovec->offset += iov_offset; |
Mitko Haralanov | c7cbf2f | 2016-02-03 14:35:23 -0800 | [diff] [blame] | 1077 | list_add_tail(&tx->txreq.list, &req->txps); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1078 | /* |
| 1079 | * It is important to increment this here as it is used to |
| 1080 | * generate the BTH.PSN and, therefore, can't be bulk-updated |
| 1081 | * outside of the loop. |
| 1082 | */ |
| 1083 | tx->seqnum = req->seqnum++; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1084 | npkts++; |
| 1085 | } |
| 1086 | dosend: |
| 1087 | ret = sdma_send_txlist(req->sde, &pq->busy, &req->txps); |
Mitko Haralanov | c7cbf2f | 2016-02-03 14:35:23 -0800 | [diff] [blame] | 1088 | if (list_empty(&req->txps)) { |
| 1089 | req->seqsubmitted = req->seqnum; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1090 | if (req->seqnum == req->info.npkts) { |
| 1091 | set_bit(SDMA_REQ_SEND_DONE, &req->flags); |
| 1092 | /* |
| 1093 | * The txreq has already been submitted to the HW queue |
| 1094 | * so we can free the AHG entry now. Corruption will not |
| 1095 | * happen due to the sequential manner in which |
| 1096 | * descriptors are processed. |
| 1097 | */ |
| 1098 | if (test_bit(SDMA_REQ_HAVE_AHG, &req->flags)) |
| 1099 | sdma_ahg_free(req->sde, req->ahg_idx); |
| 1100 | } |
Mitko Haralanov | c7cbf2f | 2016-02-03 14:35:23 -0800 | [diff] [blame] | 1101 | } else if (ret > 0) { |
| 1102 | req->seqsubmitted += ret; |
| 1103 | ret = 0; |
| 1104 | } |
Mitko Haralanov | faa98b8 | 2015-12-08 17:10:11 -0500 | [diff] [blame] | 1105 | return ret; |
| 1106 | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1107 | free_txreq: |
| 1108 | sdma_txclean(pq->dd, &tx->txreq); |
| 1109 | free_tx: |
| 1110 | kmem_cache_free(pq->txreq_cache, tx); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1111 | return ret; |
| 1112 | } |
| 1113 | |
| 1114 | /* |
| 1115 | * How many pages in this iovec element? |
| 1116 | */ |
| 1117 | static inline int num_user_pages(const struct iovec *iov) |
| 1118 | { |
Jubin John | 50e5dcb | 2016-02-14 20:19:41 -0800 | [diff] [blame] | 1119 | const unsigned long addr = (unsigned long)iov->iov_base; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1120 | const unsigned long len = iov->iov_len; |
| 1121 | const unsigned long spage = addr & PAGE_MASK; |
| 1122 | const unsigned long epage = (addr + len - 1) & PAGE_MASK; |
| 1123 | |
| 1124 | return 1 + ((epage - spage) >> PAGE_SHIFT); |
| 1125 | } |
| 1126 | |
Mitko Haralanov | 5511d78 | 2016-03-08 11:15:44 -0800 | [diff] [blame] | 1127 | static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages) |
| 1128 | { |
| 1129 | u32 cleared = 0; |
| 1130 | struct sdma_mmu_node *node, *ptr; |
Mitko Haralanov | e88c927 | 2016-04-12 10:46:53 -0700 | [diff] [blame] | 1131 | struct list_head to_evict = LIST_HEAD_INIT(to_evict); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1132 | |
Mitko Haralanov | e88c927 | 2016-04-12 10:46:53 -0700 | [diff] [blame] | 1133 | spin_lock(&pq->evict_lock); |
Mitko Haralanov | 5511d78 | 2016-03-08 11:15:44 -0800 | [diff] [blame] | 1134 | list_for_each_entry_safe_reverse(node, ptr, &pq->evict, list) { |
| 1135 | /* Make sure that no one is still using the node. */ |
| 1136 | if (!atomic_read(&node->refcount)) { |
Mitko Haralanov | e88c927 | 2016-04-12 10:46:53 -0700 | [diff] [blame] | 1137 | set_bit(SDMA_CACHE_NODE_EVICT, &node->flags); |
| 1138 | list_del_init(&node->list); |
| 1139 | list_add(&node->list, &to_evict); |
Mitko Haralanov | 5511d78 | 2016-03-08 11:15:44 -0800 | [diff] [blame] | 1140 | cleared += node->npages; |
Mitko Haralanov | 5511d78 | 2016-03-08 11:15:44 -0800 | [diff] [blame] | 1141 | if (cleared >= npages) |
| 1142 | break; |
| 1143 | } |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1144 | } |
Mitko Haralanov | e88c927 | 2016-04-12 10:46:53 -0700 | [diff] [blame] | 1145 | spin_unlock(&pq->evict_lock); |
| 1146 | |
| 1147 | list_for_each_entry_safe(node, ptr, &to_evict, list) |
| 1148 | hfi1_mmu_rb_remove(&pq->sdma_rb_root, &node->rb); |
| 1149 | |
Mitko Haralanov | 5511d78 | 2016-03-08 11:15:44 -0800 | [diff] [blame] | 1150 | return cleared; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1151 | } |
| 1152 | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1153 | static int pin_vector_pages(struct user_sdma_request *req, |
Ira Weiny | 72720dd | 2016-07-28 12:27:25 -0400 | [diff] [blame] | 1154 | struct user_sdma_iovec *iovec) |
| 1155 | { |
Mitko Haralanov | 5511d78 | 2016-03-08 11:15:44 -0800 | [diff] [blame] | 1156 | int ret = 0, pinned, npages, cleared; |
Mitko Haralanov | 5cd3a88d | 2016-03-08 11:15:22 -0800 | [diff] [blame] | 1157 | struct page **pages; |
| 1158 | struct hfi1_user_sdma_pkt_q *pq = req->pq; |
| 1159 | struct sdma_mmu_node *node = NULL; |
| 1160 | struct mmu_rb_node *rb_node; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1161 | |
Mitko Haralanov | f53af85 | 2016-04-12 10:46:47 -0700 | [diff] [blame] | 1162 | rb_node = hfi1_mmu_rb_extract(&pq->sdma_rb_root, |
| 1163 | (unsigned long)iovec->iov.iov_base, |
| 1164 | iovec->iov.iov_len); |
Mitko Haralanov | f19bd64 | 2016-04-12 10:45:57 -0700 | [diff] [blame] | 1165 | if (rb_node && !IS_ERR(rb_node)) |
Mitko Haralanov | 5cd3a88d | 2016-03-08 11:15:22 -0800 | [diff] [blame] | 1166 | node = container_of(rb_node, struct sdma_mmu_node, rb); |
Mitko Haralanov | f19bd64 | 2016-04-12 10:45:57 -0700 | [diff] [blame] | 1167 | else |
| 1168 | rb_node = NULL; |
Mitko Haralanov | a0d4069 | 2015-12-08 17:10:13 -0500 | [diff] [blame] | 1169 | |
Mitko Haralanov | 5cd3a88d | 2016-03-08 11:15:22 -0800 | [diff] [blame] | 1170 | if (!node) { |
| 1171 | node = kzalloc(sizeof(*node), GFP_KERNEL); |
| 1172 | if (!node) |
| 1173 | return -ENOMEM; |
| 1174 | |
| 1175 | node->rb.addr = (unsigned long)iovec->iov.iov_base; |
Mitko Haralanov | 5511d78 | 2016-03-08 11:15:44 -0800 | [diff] [blame] | 1176 | node->pq = pq; |
Mitko Haralanov | 5cd3a88d | 2016-03-08 11:15:22 -0800 | [diff] [blame] | 1177 | atomic_set(&node->refcount, 0); |
Mitko Haralanov | 5511d78 | 2016-03-08 11:15:44 -0800 | [diff] [blame] | 1178 | INIT_LIST_HEAD(&node->list); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1179 | } |
Mitko Haralanov | a0d4069 | 2015-12-08 17:10:13 -0500 | [diff] [blame] | 1180 | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1181 | npages = num_user_pages(&iovec->iov); |
Mitko Haralanov | 5cd3a88d | 2016-03-08 11:15:22 -0800 | [diff] [blame] | 1182 | if (node->npages < npages) { |
| 1183 | pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL); |
| 1184 | if (!pages) { |
| 1185 | SDMA_DBG(req, "Failed page array alloc"); |
| 1186 | ret = -ENOMEM; |
| 1187 | goto bail; |
| 1188 | } |
| 1189 | memcpy(pages, node->pages, node->npages * sizeof(*pages)); |
| 1190 | |
| 1191 | npages -= node->npages; |
Mitko Haralanov | e88c927 | 2016-04-12 10:46:53 -0700 | [diff] [blame] | 1192 | |
| 1193 | /* |
| 1194 | * If rb_node is NULL, it means that this is brand new node |
| 1195 | * and, therefore not on the eviction list. |
| 1196 | * If, however, the rb_node is non-NULL, it means that the |
| 1197 | * node is already in RB tree and, therefore on the eviction |
| 1198 | * list (nodes are unconditionally inserted in the eviction |
| 1199 | * list). In that case, we have to remove the node prior to |
| 1200 | * calling the eviction function in order to prevent it from |
| 1201 | * freeing this node. |
| 1202 | */ |
| 1203 | if (rb_node) { |
| 1204 | spin_lock(&pq->evict_lock); |
| 1205 | list_del_init(&node->list); |
| 1206 | spin_unlock(&pq->evict_lock); |
| 1207 | } |
Mitko Haralanov | 5511d78 | 2016-03-08 11:15:44 -0800 | [diff] [blame] | 1208 | retry: |
Ira Weiny | 3faa3d9 | 2016-07-28 15:21:19 -0400 | [diff] [blame^] | 1209 | if (!hfi1_can_pin_pages(pq->dd, pq->mm, pq->n_locked, npages)) { |
Mitko Haralanov | 5511d78 | 2016-03-08 11:15:44 -0800 | [diff] [blame] | 1210 | cleared = sdma_cache_evict(pq, npages); |
Mitko Haralanov | 5511d78 | 2016-03-08 11:15:44 -0800 | [diff] [blame] | 1211 | if (cleared >= npages) |
| 1212 | goto retry; |
| 1213 | } |
Ira Weiny | 3faa3d9 | 2016-07-28 15:21:19 -0400 | [diff] [blame^] | 1214 | pinned = hfi1_acquire_user_pages(pq->mm, |
Mitko Haralanov | 5cd3a88d | 2016-03-08 11:15:22 -0800 | [diff] [blame] | 1215 | ((unsigned long)iovec->iov.iov_base + |
| 1216 | (node->npages * PAGE_SIZE)), npages, 0, |
| 1217 | pages + node->npages); |
| 1218 | if (pinned < 0) { |
| 1219 | kfree(pages); |
| 1220 | ret = pinned; |
| 1221 | goto bail; |
| 1222 | } |
| 1223 | if (pinned != npages) { |
Ira Weiny | 3faa3d9 | 2016-07-28 15:21:19 -0400 | [diff] [blame^] | 1224 | unpin_vector_pages(pq->mm, pages, node->npages, |
Mitko Haralanov | 849e3e9 | 2016-04-12 10:46:16 -0700 | [diff] [blame] | 1225 | pinned); |
Mitko Haralanov | 5cd3a88d | 2016-03-08 11:15:22 -0800 | [diff] [blame] | 1226 | ret = -EFAULT; |
| 1227 | goto bail; |
| 1228 | } |
| 1229 | kfree(node->pages); |
Mitko Haralanov | de79093 | 2016-04-12 10:46:41 -0700 | [diff] [blame] | 1230 | node->rb.len = iovec->iov.iov_len; |
Mitko Haralanov | 5cd3a88d | 2016-03-08 11:15:22 -0800 | [diff] [blame] | 1231 | node->pages = pages; |
| 1232 | node->npages += pinned; |
| 1233 | npages = node->npages; |
Mitko Haralanov | 5511d78 | 2016-03-08 11:15:44 -0800 | [diff] [blame] | 1234 | spin_lock(&pq->evict_lock); |
Mitko Haralanov | e88c927 | 2016-04-12 10:46:53 -0700 | [diff] [blame] | 1235 | list_add(&node->list, &pq->evict); |
Mitko Haralanov | 5511d78 | 2016-03-08 11:15:44 -0800 | [diff] [blame] | 1236 | pq->n_locked += pinned; |
| 1237 | spin_unlock(&pq->evict_lock); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1238 | } |
Mitko Haralanov | 5cd3a88d | 2016-03-08 11:15:22 -0800 | [diff] [blame] | 1239 | iovec->pages = node->pages; |
| 1240 | iovec->npages = npages; |
Mitko Haralanov | 9565c6a | 2016-05-19 05:21:18 -0700 | [diff] [blame] | 1241 | iovec->node = node; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1242 | |
Mitko Haralanov | f53af85 | 2016-04-12 10:46:47 -0700 | [diff] [blame] | 1243 | ret = hfi1_mmu_rb_insert(&req->pq->sdma_rb_root, &node->rb); |
| 1244 | if (ret) { |
| 1245 | spin_lock(&pq->evict_lock); |
| 1246 | if (!list_empty(&node->list)) |
| 1247 | list_del(&node->list); |
| 1248 | pq->n_locked -= node->npages; |
| 1249 | spin_unlock(&pq->evict_lock); |
Dean Luick | a383f8e | 2016-07-28 15:21:16 -0400 | [diff] [blame] | 1250 | iovec->node = NULL; |
Mitko Haralanov | f53af85 | 2016-04-12 10:46:47 -0700 | [diff] [blame] | 1251 | goto bail; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1252 | } |
| 1253 | return 0; |
Mitko Haralanov | 5cd3a88d | 2016-03-08 11:15:22 -0800 | [diff] [blame] | 1254 | bail: |
Mitko Haralanov | f53af85 | 2016-04-12 10:46:47 -0700 | [diff] [blame] | 1255 | if (rb_node) |
Ira Weiny | 3faa3d9 | 2016-07-28 15:21:19 -0400 | [diff] [blame^] | 1256 | unpin_vector_pages(pq->mm, node->pages, 0, node->npages); |
Mitko Haralanov | f53af85 | 2016-04-12 10:46:47 -0700 | [diff] [blame] | 1257 | kfree(node); |
Mitko Haralanov | 5cd3a88d | 2016-03-08 11:15:22 -0800 | [diff] [blame] | 1258 | return ret; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1259 | } |
| 1260 | |
Mitko Haralanov | bd3a894 | 2016-03-08 11:15:33 -0800 | [diff] [blame] | 1261 | static void unpin_vector_pages(struct mm_struct *mm, struct page **pages, |
Mitko Haralanov | 849e3e9 | 2016-04-12 10:46:16 -0700 | [diff] [blame] | 1262 | unsigned start, unsigned npages) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1263 | { |
Ira Weiny | 639297b | 2016-07-28 12:27:33 -0400 | [diff] [blame] | 1264 | hfi1_release_user_pages(mm, pages + start, npages, false); |
Mitko Haralanov | 5cd3a88d | 2016-03-08 11:15:22 -0800 | [diff] [blame] | 1265 | kfree(pages); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1266 | } |
| 1267 | |
| 1268 | static int check_header_template(struct user_sdma_request *req, |
| 1269 | struct hfi1_pkt_header *hdr, u32 lrhlen, |
| 1270 | u32 datalen) |
| 1271 | { |
| 1272 | /* |
| 1273 | * Perform safety checks for any type of packet: |
| 1274 | * - transfer size is multiple of 64bytes |
Ira Weiny | c492980 | 2016-07-27 21:08:42 -0400 | [diff] [blame] | 1275 | * - packet length is multiple of 4 bytes |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1276 | * - packet length is not larger than MTU size |
| 1277 | * |
| 1278 | * These checks are only done for the first packet of the |
| 1279 | * transfer since the header is "given" to us by user space. |
| 1280 | * For the remainder of the packets we compute the values. |
| 1281 | */ |
Ira Weiny | c492980 | 2016-07-27 21:08:42 -0400 | [diff] [blame] | 1282 | if (req->info.fragsize % PIO_BLOCK_SIZE || lrhlen & 0x3 || |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1283 | lrhlen > get_lrh_len(*hdr, req->info.fragsize)) |
| 1284 | return -EINVAL; |
| 1285 | |
| 1286 | if (req_opcode(req->info.ctrl) == EXPECTED) { |
| 1287 | /* |
| 1288 | * The header is checked only on the first packet. Furthermore, |
| 1289 | * we ensure that at least one TID entry is copied when the |
| 1290 | * request is submitted. Therefore, we don't have to verify that |
| 1291 | * tididx points to something sane. |
| 1292 | */ |
| 1293 | u32 tidval = req->tids[req->tididx], |
| 1294 | tidlen = EXP_TID_GET(tidval, LEN) * PAGE_SIZE, |
| 1295 | tididx = EXP_TID_GET(tidval, IDX), |
| 1296 | tidctrl = EXP_TID_GET(tidval, CTRL), |
| 1297 | tidoff; |
| 1298 | __le32 kval = hdr->kdeth.ver_tid_offset; |
| 1299 | |
| 1300 | tidoff = KDETH_GET(kval, OFFSET) * |
| 1301 | (KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ? |
| 1302 | KDETH_OM_LARGE : KDETH_OM_SMALL); |
| 1303 | /* |
| 1304 | * Expected receive packets have the following |
| 1305 | * additional checks: |
| 1306 | * - offset is not larger than the TID size |
| 1307 | * - TIDCtrl values match between header and TID array |
| 1308 | * - TID indexes match between header and TID array |
| 1309 | */ |
| 1310 | if ((tidoff + datalen > tidlen) || |
| 1311 | KDETH_GET(kval, TIDCTRL) != tidctrl || |
| 1312 | KDETH_GET(kval, TID) != tididx) |
| 1313 | return -EINVAL; |
| 1314 | } |
| 1315 | return 0; |
| 1316 | } |
| 1317 | |
| 1318 | /* |
| 1319 | * Correctly set the BTH.PSN field based on type of |
| 1320 | * transfer - eager packets can just increment the PSN but |
| 1321 | * expected packets encode generation and sequence in the |
| 1322 | * BTH.PSN field so just incrementing will result in errors. |
| 1323 | */ |
| 1324 | static inline u32 set_pkt_bth_psn(__be32 bthpsn, u8 expct, u32 frags) |
| 1325 | { |
| 1326 | u32 val = be32_to_cpu(bthpsn), |
| 1327 | mask = (HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffffull : |
| 1328 | 0xffffffull), |
| 1329 | psn = val & mask; |
| 1330 | if (expct) |
| 1331 | psn = (psn & ~BTH_SEQ_MASK) | ((psn + frags) & BTH_SEQ_MASK); |
| 1332 | else |
| 1333 | psn = psn + frags; |
| 1334 | return psn & mask; |
| 1335 | } |
| 1336 | |
| 1337 | static int set_txreq_header(struct user_sdma_request *req, |
| 1338 | struct user_sdma_txreq *tx, u32 datalen) |
| 1339 | { |
| 1340 | struct hfi1_user_sdma_pkt_q *pq = req->pq; |
| 1341 | struct hfi1_pkt_header *hdr = &tx->hdr; |
| 1342 | u16 pbclen; |
| 1343 | int ret; |
Ira Weiny | c492980 | 2016-07-27 21:08:42 -0400 | [diff] [blame] | 1344 | u32 tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(datalen)); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1345 | |
| 1346 | /* Copy the header template to the request before modification */ |
| 1347 | memcpy(hdr, &req->hdr, sizeof(*hdr)); |
| 1348 | |
| 1349 | /* |
| 1350 | * Check if the PBC and LRH length are mismatched. If so |
| 1351 | * adjust both in the header. |
| 1352 | */ |
| 1353 | pbclen = le16_to_cpu(hdr->pbc[0]); |
| 1354 | if (PBC2LRH(pbclen) != lrhlen) { |
| 1355 | pbclen = (pbclen & 0xf000) | LRH2PBC(lrhlen); |
| 1356 | hdr->pbc[0] = cpu_to_le16(pbclen); |
| 1357 | hdr->lrh[2] = cpu_to_be16(lrhlen >> 2); |
| 1358 | /* |
| 1359 | * Third packet |
| 1360 | * This is the first packet in the sequence that has |
| 1361 | * a "static" size that can be used for the rest of |
| 1362 | * the packets (besides the last one). |
| 1363 | */ |
| 1364 | if (unlikely(req->seqnum == 2)) { |
| 1365 | /* |
| 1366 | * From this point on the lengths in both the |
| 1367 | * PBC and LRH are the same until the last |
| 1368 | * packet. |
| 1369 | * Adjust the template so we don't have to update |
| 1370 | * every packet |
| 1371 | */ |
| 1372 | req->hdr.pbc[0] = hdr->pbc[0]; |
| 1373 | req->hdr.lrh[2] = hdr->lrh[2]; |
| 1374 | } |
| 1375 | } |
| 1376 | /* |
| 1377 | * We only have to modify the header if this is not the |
| 1378 | * first packet in the request. Otherwise, we use the |
| 1379 | * header given to us. |
| 1380 | */ |
| 1381 | if (unlikely(!req->seqnum)) { |
| 1382 | ret = check_header_template(req, hdr, lrhlen, datalen); |
| 1383 | if (ret) |
| 1384 | return ret; |
| 1385 | goto done; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1386 | } |
| 1387 | |
| 1388 | hdr->bth[2] = cpu_to_be32( |
| 1389 | set_pkt_bth_psn(hdr->bth[2], |
| 1390 | (req_opcode(req->info.ctrl) == EXPECTED), |
| 1391 | req->seqnum)); |
| 1392 | |
| 1393 | /* Set ACK request on last packet */ |
Mitko Haralanov | b9fb6318 | 2015-10-26 10:28:37 -0400 | [diff] [blame] | 1394 | if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT)) |
Jubin John | 8638b77 | 2016-02-14 20:19:24 -0800 | [diff] [blame] | 1395 | hdr->bth[2] |= cpu_to_be32(1UL << 31); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1396 | |
| 1397 | /* Set the new offset */ |
| 1398 | hdr->kdeth.swdata[6] = cpu_to_le32(req->koffset); |
| 1399 | /* Expected packets have to fill in the new TID information */ |
| 1400 | if (req_opcode(req->info.ctrl) == EXPECTED) { |
| 1401 | tidval = req->tids[req->tididx]; |
| 1402 | /* |
| 1403 | * If the offset puts us at the end of the current TID, |
| 1404 | * advance everything. |
| 1405 | */ |
| 1406 | if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) * |
| 1407 | PAGE_SIZE)) { |
| 1408 | req->tidoffset = 0; |
Jubin John | 4d114fd | 2016-02-14 20:21:43 -0800 | [diff] [blame] | 1409 | /* |
| 1410 | * Since we don't copy all the TIDs, all at once, |
| 1411 | * we have to check again. |
| 1412 | */ |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1413 | if (++req->tididx > req->n_tids - 1 || |
| 1414 | !req->tids[req->tididx]) { |
| 1415 | return -EINVAL; |
| 1416 | } |
| 1417 | tidval = req->tids[req->tididx]; |
| 1418 | } |
| 1419 | req->omfactor = EXP_TID_GET(tidval, LEN) * PAGE_SIZE >= |
| 1420 | KDETH_OM_MAX_SIZE ? KDETH_OM_LARGE : KDETH_OM_SMALL; |
| 1421 | /* Set KDETH.TIDCtrl based on value for this TID. */ |
| 1422 | KDETH_SET(hdr->kdeth.ver_tid_offset, TIDCTRL, |
| 1423 | EXP_TID_GET(tidval, CTRL)); |
| 1424 | /* Set KDETH.TID based on value for this TID */ |
| 1425 | KDETH_SET(hdr->kdeth.ver_tid_offset, TID, |
| 1426 | EXP_TID_GET(tidval, IDX)); |
| 1427 | /* Clear KDETH.SH only on the last packet */ |
Mitko Haralanov | b9fb6318 | 2015-10-26 10:28:37 -0400 | [diff] [blame] | 1428 | if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT)) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1429 | KDETH_SET(hdr->kdeth.ver_tid_offset, SH, 0); |
| 1430 | /* |
| 1431 | * Set the KDETH.OFFSET and KDETH.OM based on size of |
| 1432 | * transfer. |
| 1433 | */ |
| 1434 | SDMA_DBG(req, "TID offset %ubytes %uunits om%u", |
| 1435 | req->tidoffset, req->tidoffset / req->omfactor, |
Bart Van Assche | 55c40648 | 2016-06-03 12:11:16 -0700 | [diff] [blame] | 1436 | req->omfactor != KDETH_OM_SMALL); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1437 | KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET, |
| 1438 | req->tidoffset / req->omfactor); |
| 1439 | KDETH_SET(hdr->kdeth.ver_tid_offset, OM, |
Bart Van Assche | 55c40648 | 2016-06-03 12:11:16 -0700 | [diff] [blame] | 1440 | req->omfactor != KDETH_OM_SMALL); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1441 | } |
| 1442 | done: |
| 1443 | trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt, |
| 1444 | req->info.comp_idx, hdr, tidval); |
| 1445 | return sdma_txadd_kvaddr(pq->dd, &tx->txreq, hdr, sizeof(*hdr)); |
| 1446 | } |
| 1447 | |
| 1448 | static int set_txreq_header_ahg(struct user_sdma_request *req, |
| 1449 | struct user_sdma_txreq *tx, u32 len) |
| 1450 | { |
| 1451 | int diff = 0; |
| 1452 | struct hfi1_user_sdma_pkt_q *pq = req->pq; |
| 1453 | struct hfi1_pkt_header *hdr = &req->hdr; |
| 1454 | u16 pbclen = le16_to_cpu(hdr->pbc[0]); |
Ira Weiny | c492980 | 2016-07-27 21:08:42 -0400 | [diff] [blame] | 1455 | u32 val32, tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(len)); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1456 | |
| 1457 | if (PBC2LRH(pbclen) != lrhlen) { |
| 1458 | /* PBC.PbcLengthDWs */ |
| 1459 | AHG_HEADER_SET(req->ahg, diff, 0, 0, 12, |
| 1460 | cpu_to_le16(LRH2PBC(lrhlen))); |
| 1461 | /* LRH.PktLen (we need the full 16 bits due to byte swap) */ |
| 1462 | AHG_HEADER_SET(req->ahg, diff, 3, 0, 16, |
| 1463 | cpu_to_be16(lrhlen >> 2)); |
| 1464 | } |
| 1465 | |
| 1466 | /* |
| 1467 | * Do the common updates |
| 1468 | */ |
| 1469 | /* BTH.PSN and BTH.A */ |
| 1470 | val32 = (be32_to_cpu(hdr->bth[2]) + req->seqnum) & |
| 1471 | (HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffff : 0xffffff); |
Mitko Haralanov | b9fb6318 | 2015-10-26 10:28:37 -0400 | [diff] [blame] | 1472 | if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT)) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1473 | val32 |= 1UL << 31; |
| 1474 | AHG_HEADER_SET(req->ahg, diff, 6, 0, 16, cpu_to_be16(val32 >> 16)); |
| 1475 | AHG_HEADER_SET(req->ahg, diff, 6, 16, 16, cpu_to_be16(val32 & 0xffff)); |
| 1476 | /* KDETH.Offset */ |
| 1477 | AHG_HEADER_SET(req->ahg, diff, 15, 0, 16, |
| 1478 | cpu_to_le16(req->koffset & 0xffff)); |
| 1479 | AHG_HEADER_SET(req->ahg, diff, 15, 16, 16, |
| 1480 | cpu_to_le16(req->koffset >> 16)); |
| 1481 | if (req_opcode(req->info.ctrl) == EXPECTED) { |
| 1482 | __le16 val; |
| 1483 | |
| 1484 | tidval = req->tids[req->tididx]; |
| 1485 | |
| 1486 | /* |
| 1487 | * If the offset puts us at the end of the current TID, |
| 1488 | * advance everything. |
| 1489 | */ |
| 1490 | if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) * |
| 1491 | PAGE_SIZE)) { |
| 1492 | req->tidoffset = 0; |
Jubin John | 4d114fd | 2016-02-14 20:21:43 -0800 | [diff] [blame] | 1493 | /* |
| 1494 | * Since we don't copy all the TIDs, all at once, |
| 1495 | * we have to check again. |
| 1496 | */ |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1497 | if (++req->tididx > req->n_tids - 1 || |
| 1498 | !req->tids[req->tididx]) { |
| 1499 | return -EINVAL; |
| 1500 | } |
| 1501 | tidval = req->tids[req->tididx]; |
| 1502 | } |
| 1503 | req->omfactor = ((EXP_TID_GET(tidval, LEN) * |
| 1504 | PAGE_SIZE) >= |
| 1505 | KDETH_OM_MAX_SIZE) ? KDETH_OM_LARGE : |
| 1506 | KDETH_OM_SMALL; |
| 1507 | /* KDETH.OM and KDETH.OFFSET (TID) */ |
| 1508 | AHG_HEADER_SET(req->ahg, diff, 7, 0, 16, |
| 1509 | ((!!(req->omfactor - KDETH_OM_SMALL)) << 15 | |
| 1510 | ((req->tidoffset / req->omfactor) & 0x7fff))); |
| 1511 | /* KDETH.TIDCtrl, KDETH.TID */ |
| 1512 | val = cpu_to_le16(((EXP_TID_GET(tidval, CTRL) & 0x3) << 10) | |
| 1513 | (EXP_TID_GET(tidval, IDX) & 0x3ff)); |
| 1514 | /* Clear KDETH.SH on last packet */ |
Mitko Haralanov | b9fb6318 | 2015-10-26 10:28:37 -0400 | [diff] [blame] | 1515 | if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT)) { |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1516 | val |= cpu_to_le16(KDETH_GET(hdr->kdeth.ver_tid_offset, |
| 1517 | INTR) >> 16); |
| 1518 | val &= cpu_to_le16(~(1U << 13)); |
| 1519 | AHG_HEADER_SET(req->ahg, diff, 7, 16, 14, val); |
Jubin John | e490974 | 2016-02-14 20:22:00 -0800 | [diff] [blame] | 1520 | } else { |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1521 | AHG_HEADER_SET(req->ahg, diff, 7, 16, 12, val); |
Jubin John | e490974 | 2016-02-14 20:22:00 -0800 | [diff] [blame] | 1522 | } |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1523 | } |
| 1524 | |
| 1525 | trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt, |
| 1526 | req->info.comp_idx, req->sde->this_idx, |
| 1527 | req->ahg_idx, req->ahg, diff, tidval); |
| 1528 | return diff; |
| 1529 | } |
| 1530 | |
Mitko Haralanov | a0d4069 | 2015-12-08 17:10:13 -0500 | [diff] [blame] | 1531 | /* |
| 1532 | * SDMA tx request completion callback. Called when the SDMA progress |
| 1533 | * state machine gets notification that the SDMA descriptors for this |
| 1534 | * tx request have been processed by the DMA engine. Called in |
| 1535 | * interrupt context. |
| 1536 | */ |
Mike Marciniszyn | a545f53 | 2016-02-14 12:45:53 -0800 | [diff] [blame] | 1537 | static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1538 | { |
| 1539 | struct user_sdma_txreq *tx = |
| 1540 | container_of(txreq, struct user_sdma_txreq, txreq); |
Mitko Haralanov | a0d4069 | 2015-12-08 17:10:13 -0500 | [diff] [blame] | 1541 | struct user_sdma_request *req; |
Mitko Haralanov | 0f2d87d | 2016-02-03 14:35:06 -0800 | [diff] [blame] | 1542 | struct hfi1_user_sdma_pkt_q *pq; |
| 1543 | struct hfi1_user_sdma_comp_q *cq; |
| 1544 | u16 idx; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1545 | |
Mitko Haralanov | a0d4069 | 2015-12-08 17:10:13 -0500 | [diff] [blame] | 1546 | if (!tx->req) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1547 | return; |
| 1548 | |
Mitko Haralanov | a0d4069 | 2015-12-08 17:10:13 -0500 | [diff] [blame] | 1549 | req = tx->req; |
Mitko Haralanov | 0f2d87d | 2016-02-03 14:35:06 -0800 | [diff] [blame] | 1550 | pq = req->pq; |
| 1551 | cq = req->cq; |
Mitko Haralanov | b9fb6318 | 2015-10-26 10:28:37 -0400 | [diff] [blame] | 1552 | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1553 | if (status != SDMA_TXREQ_S_OK) { |
Mitko Haralanov | a0d4069 | 2015-12-08 17:10:13 -0500 | [diff] [blame] | 1554 | SDMA_DBG(req, "SDMA completion with error %d", |
| 1555 | status); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1556 | set_bit(SDMA_REQ_HAS_ERROR, &req->flags); |
Mitko Haralanov | a0d4069 | 2015-12-08 17:10:13 -0500 | [diff] [blame] | 1557 | } |
| 1558 | |
Mitko Haralanov | 0f2d87d | 2016-02-03 14:35:06 -0800 | [diff] [blame] | 1559 | req->seqcomp = tx->seqnum; |
| 1560 | kmem_cache_free(pq->txreq_cache, tx); |
| 1561 | tx = NULL; |
| 1562 | |
| 1563 | idx = req->info.comp_idx; |
| 1564 | if (req->status == -1 && status == SDMA_TXREQ_S_OK) { |
| 1565 | if (req->seqcomp == req->info.npkts - 1) { |
| 1566 | req->status = 0; |
| 1567 | user_sdma_free_request(req, false); |
| 1568 | pq_update(pq); |
| 1569 | set_comp_state(pq, cq, idx, COMPLETE, 0); |
| 1570 | } |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1571 | } else { |
Mitko Haralanov | 0f2d87d | 2016-02-03 14:35:06 -0800 | [diff] [blame] | 1572 | if (status != SDMA_TXREQ_S_OK) |
| 1573 | req->status = status; |
Mitko Haralanov | c7cbf2f | 2016-02-03 14:35:23 -0800 | [diff] [blame] | 1574 | if (req->seqcomp == (ACCESS_ONCE(req->seqsubmitted) - 1) && |
| 1575 | (test_bit(SDMA_REQ_SEND_DONE, &req->flags) || |
| 1576 | test_bit(SDMA_REQ_DONE_ERROR, &req->flags))) { |
Mitko Haralanov | 0f2d87d | 2016-02-03 14:35:06 -0800 | [diff] [blame] | 1577 | user_sdma_free_request(req, false); |
| 1578 | pq_update(pq); |
| 1579 | set_comp_state(pq, cq, idx, ERROR, req->status); |
| 1580 | } |
Mitko Haralanov | a0d4069 | 2015-12-08 17:10:13 -0500 | [diff] [blame] | 1581 | } |
| 1582 | } |
| 1583 | |
Mitko Haralanov | 0f2d87d | 2016-02-03 14:35:06 -0800 | [diff] [blame] | 1584 | static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq) |
Mitko Haralanov | a0d4069 | 2015-12-08 17:10:13 -0500 | [diff] [blame] | 1585 | { |
Mitko Haralanov | 0f2d87d | 2016-02-03 14:35:06 -0800 | [diff] [blame] | 1586 | if (atomic_dec_and_test(&pq->n_reqs)) { |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1587 | xchg(&pq->state, SDMA_PKT_Q_INACTIVE); |
Mitko Haralanov | a0d4069 | 2015-12-08 17:10:13 -0500 | [diff] [blame] | 1588 | wake_up(&pq->wait); |
| 1589 | } |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1590 | } |
| 1591 | |
Mitko Haralanov | 0f2d87d | 2016-02-03 14:35:06 -0800 | [diff] [blame] | 1592 | static void user_sdma_free_request(struct user_sdma_request *req, bool unpin) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1593 | { |
| 1594 | if (!list_empty(&req->txps)) { |
| 1595 | struct sdma_txreq *t, *p; |
| 1596 | |
| 1597 | list_for_each_entry_safe(t, p, &req->txps, list) { |
| 1598 | struct user_sdma_txreq *tx = |
| 1599 | container_of(t, struct user_sdma_txreq, txreq); |
| 1600 | list_del_init(&t->list); |
| 1601 | sdma_txclean(req->pq->dd, t); |
| 1602 | kmem_cache_free(req->pq->txreq_cache, tx); |
| 1603 | } |
| 1604 | } |
| 1605 | if (req->data_iovs) { |
Mitko Haralanov | 5cd3a88d | 2016-03-08 11:15:22 -0800 | [diff] [blame] | 1606 | struct sdma_mmu_node *node; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1607 | int i; |
| 1608 | |
Mitko Haralanov | 5cd3a88d | 2016-03-08 11:15:22 -0800 | [diff] [blame] | 1609 | for (i = 0; i < req->data_iovs; i++) { |
Mitko Haralanov | 9565c6a | 2016-05-19 05:21:18 -0700 | [diff] [blame] | 1610 | node = req->iovs[i].node; |
| 1611 | if (!node) |
Mitko Haralanov | 5cd3a88d | 2016-03-08 11:15:22 -0800 | [diff] [blame] | 1612 | continue; |
| 1613 | |
Mitko Haralanov | 5cd3a88d | 2016-03-08 11:15:22 -0800 | [diff] [blame] | 1614 | if (unpin) |
| 1615 | hfi1_mmu_rb_remove(&req->pq->sdma_rb_root, |
| 1616 | &node->rb); |
| 1617 | else |
| 1618 | atomic_dec(&node->refcount); |
| 1619 | } |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1620 | } |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1621 | kfree(req->tids); |
Dean Luick | 7b3256e | 2016-07-28 15:21:18 -0400 | [diff] [blame] | 1622 | clear_bit(req->info.comp_idx, req->pq->req_in_use); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1623 | } |
| 1624 | |
Mitko Haralanov | 0f2d87d | 2016-02-03 14:35:06 -0800 | [diff] [blame] | 1625 | static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq, |
| 1626 | struct hfi1_user_sdma_comp_q *cq, |
| 1627 | u16 idx, enum hfi1_sdma_comp_state state, |
| 1628 | int ret) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1629 | { |
Mitko Haralanov | 0f2d87d | 2016-02-03 14:35:06 -0800 | [diff] [blame] | 1630 | hfi1_cdbg(SDMA, "[%u:%u:%u:%u] Setting completion status %u %d", |
| 1631 | pq->dd->unit, pq->ctxt, pq->subctxt, idx, state, ret); |
| 1632 | cq->comps[idx].status = state; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1633 | if (state == ERROR) |
Mitko Haralanov | 0f2d87d | 2016-02-03 14:35:06 -0800 | [diff] [blame] | 1634 | cq->comps[idx].errcode = -ret; |
| 1635 | trace_hfi1_sdma_user_completion(pq->dd, pq->ctxt, pq->subctxt, |
| 1636 | idx, state, ret); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1637 | } |
Mitko Haralanov | 5cd3a88d | 2016-03-08 11:15:22 -0800 | [diff] [blame] | 1638 | |
| 1639 | static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr, |
| 1640 | unsigned long len) |
| 1641 | { |
| 1642 | return (bool)(node->addr == addr); |
| 1643 | } |
| 1644 | |
| 1645 | static int sdma_rb_insert(struct rb_root *root, struct mmu_rb_node *mnode) |
| 1646 | { |
| 1647 | struct sdma_mmu_node *node = |
| 1648 | container_of(mnode, struct sdma_mmu_node, rb); |
| 1649 | |
| 1650 | atomic_inc(&node->refcount); |
| 1651 | return 0; |
| 1652 | } |
| 1653 | |
| 1654 | static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode, |
Mitko Haralanov | f19bd64 | 2016-04-12 10:45:57 -0700 | [diff] [blame] | 1655 | struct mm_struct *mm) |
Mitko Haralanov | 5cd3a88d | 2016-03-08 11:15:22 -0800 | [diff] [blame] | 1656 | { |
| 1657 | struct sdma_mmu_node *node = |
| 1658 | container_of(mnode, struct sdma_mmu_node, rb); |
| 1659 | |
Mitko Haralanov | 5511d78 | 2016-03-08 11:15:44 -0800 | [diff] [blame] | 1660 | spin_lock(&node->pq->evict_lock); |
Mitko Haralanov | e88c927 | 2016-04-12 10:46:53 -0700 | [diff] [blame] | 1661 | /* |
| 1662 | * We've been called by the MMU notifier but this node has been |
| 1663 | * scheduled for eviction. The eviction function will take care |
| 1664 | * of freeing this node. |
| 1665 | * We have to take the above lock first because we are racing |
| 1666 | * against the setting of the bit in the eviction function. |
| 1667 | */ |
| 1668 | if (mm && test_bit(SDMA_CACHE_NODE_EVICT, &node->flags)) { |
| 1669 | spin_unlock(&node->pq->evict_lock); |
| 1670 | return; |
| 1671 | } |
| 1672 | |
Mitko Haralanov | 4787bc5 | 2016-04-12 10:46:23 -0700 | [diff] [blame] | 1673 | if (!list_empty(&node->list)) |
| 1674 | list_del(&node->list); |
Mitko Haralanov | 5511d78 | 2016-03-08 11:15:44 -0800 | [diff] [blame] | 1675 | node->pq->n_locked -= node->npages; |
| 1676 | spin_unlock(&node->pq->evict_lock); |
| 1677 | |
Mitko Haralanov | f19bd64 | 2016-04-12 10:45:57 -0700 | [diff] [blame] | 1678 | /* |
| 1679 | * If mm is set, we are being called by the MMU notifier and we |
| 1680 | * should not pass a mm_struct to unpin_vector_page(). This is to |
| 1681 | * prevent a deadlock when hfi1_release_user_pages() attempts to |
| 1682 | * take the mmap_sem, which the MMU notifier has already taken. |
| 1683 | */ |
Mitko Haralanov | 849e3e9 | 2016-04-12 10:46:16 -0700 | [diff] [blame] | 1684 | unpin_vector_pages(mm ? NULL : current->mm, node->pages, 0, |
| 1685 | node->npages); |
Mitko Haralanov | bd3a894 | 2016-03-08 11:15:33 -0800 | [diff] [blame] | 1686 | /* |
| 1687 | * If called by the MMU notifier, we have to adjust the pinned |
| 1688 | * page count ourselves. |
| 1689 | */ |
Mitko Haralanov | f19bd64 | 2016-04-12 10:45:57 -0700 | [diff] [blame] | 1690 | if (mm) |
| 1691 | mm->pinned_vm -= node->npages; |
Mitko Haralanov | 5cd3a88d | 2016-03-08 11:15:22 -0800 | [diff] [blame] | 1692 | kfree(node); |
| 1693 | } |
| 1694 | |
| 1695 | static int sdma_rb_invalidate(struct rb_root *root, struct mmu_rb_node *mnode) |
| 1696 | { |
| 1697 | struct sdma_mmu_node *node = |
| 1698 | container_of(mnode, struct sdma_mmu_node, rb); |
| 1699 | |
| 1700 | if (!atomic_read(&node->refcount)) |
| 1701 | return 1; |
| 1702 | return 0; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1703 | } |