Mitko Haralanov | f727a0c | 2016-02-05 11:57:46 -0500 | [diff] [blame] | 1 | /* |
Vishwanathapura, Niranjana | 2280740 | 2017-04-12 20:29:29 -0700 | [diff] [blame] | 2 | * Copyright(c) 2015-2017 Intel Corporation. |
Mitko Haralanov | f727a0c | 2016-02-05 11:57:46 -0500 | [diff] [blame] | 3 | * |
| 4 | * This file is provided under a dual BSD/GPLv2 license. When using or |
| 5 | * redistributing this file, you may do so under either license. |
| 6 | * |
| 7 | * GPL LICENSE SUMMARY |
| 8 | * |
Mitko Haralanov | f727a0c | 2016-02-05 11:57:46 -0500 | [diff] [blame] | 9 | * This program is free software; you can redistribute it and/or modify |
| 10 | * it under the terms of version 2 of the GNU General Public License as |
| 11 | * published by the Free Software Foundation. |
| 12 | * |
| 13 | * This program is distributed in the hope that it will be useful, but |
| 14 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 16 | * General Public License for more details. |
| 17 | * |
| 18 | * BSD LICENSE |
| 19 | * |
Mitko Haralanov | f727a0c | 2016-02-05 11:57:46 -0500 | [diff] [blame] | 20 | * Redistribution and use in source and binary forms, with or without |
| 21 | * modification, are permitted provided that the following conditions |
| 22 | * are met: |
| 23 | * |
| 24 | * - Redistributions of source code must retain the above copyright |
| 25 | * notice, this list of conditions and the following disclaimer. |
| 26 | * - Redistributions in binary form must reproduce the above copyright |
| 27 | * notice, this list of conditions and the following disclaimer in |
| 28 | * the documentation and/or other materials provided with the |
| 29 | * distribution. |
| 30 | * - Neither the name of Intel Corporation nor the names of its |
| 31 | * contributors may be used to endorse or promote products derived |
| 32 | * from this software without specific prior written permission. |
| 33 | * |
| 34 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 35 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 36 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 37 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 38 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 39 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 40 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 41 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 42 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 43 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 44 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 45 | * |
| 46 | */ |
| 47 | #include <asm/page.h> |
Michael J. Ruhl | 1bb0d7b | 2017-02-08 05:28:31 -0800 | [diff] [blame] | 48 | #include <linux/string.h> |
Mitko Haralanov | f727a0c | 2016-02-05 11:57:46 -0500 | [diff] [blame] | 49 | |
| 50 | #include "user_exp_rcv.h" |
| 51 | #include "trace.h" |
Mitko Haralanov | 06e0ffa | 2016-03-08 11:14:20 -0800 | [diff] [blame] | 52 | #include "mmu_rb.h" |
Mitko Haralanov | f727a0c | 2016-02-05 11:57:46 -0500 | [diff] [blame] | 53 | |
Mitko Haralanov | 06e0ffa | 2016-03-08 11:14:20 -0800 | [diff] [blame] | 54 | struct tid_rb_node { |
| 55 | struct mmu_rb_node mmu; |
Mitko Haralanov | f727a0c | 2016-02-05 11:57:46 -0500 | [diff] [blame] | 56 | unsigned long phys; |
Mitko Haralanov | f727a0c | 2016-02-05 11:57:46 -0500 | [diff] [blame] | 57 | struct tid_group *grp; |
| 58 | u32 rcventry; |
| 59 | dma_addr_t dma_addr; |
| 60 | bool freed; |
| 61 | unsigned npages; |
| 62 | struct page *pages[0]; |
| 63 | }; |
| 64 | |
Mitko Haralanov | f88e0c8 | 2016-02-05 11:57:52 -0500 | [diff] [blame] | 65 | struct tid_pageset { |
| 66 | u16 idx; |
| 67 | u16 count; |
| 68 | }; |
| 69 | |
Mitko Haralanov | 3abb33a | 2016-02-05 11:57:54 -0500 | [diff] [blame] | 70 | #define num_user_pages(vaddr, len) \ |
| 71 | (1 + (((((unsigned long)(vaddr) + \ |
| 72 | (unsigned long)(len) - 1) & PAGE_MASK) - \ |
| 73 | ((unsigned long)vaddr & PAGE_MASK)) >> PAGE_SHIFT)) |
| 74 | |
Michael J. Ruhl | f4cd876 | 2017-05-04 05:14:39 -0700 | [diff] [blame] | 75 | static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt, |
| 76 | struct exp_tid_set *set, |
| 77 | struct hfi1_filedata *fd); |
| 78 | static u32 find_phys_blocks(struct page **pages, unsigned npages, |
| 79 | struct tid_pageset *list); |
Michael J. Ruhl | 5042cdd | 2017-05-04 05:14:45 -0700 | [diff] [blame] | 80 | static int set_rcvarray_entry(struct hfi1_filedata *fd, unsigned long vaddr, |
Michael J. Ruhl | f4cd876 | 2017-05-04 05:14:39 -0700 | [diff] [blame] | 81 | u32 rcventry, struct tid_group *grp, |
| 82 | struct page **pages, unsigned npages); |
| 83 | static int tid_rb_insert(void *arg, struct mmu_rb_node *node); |
Ira Weiny | 2677a76 | 2016-07-28 15:21:26 -0400 | [diff] [blame] | 84 | static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata, |
| 85 | struct tid_rb_node *tnode); |
Michael J. Ruhl | f4cd876 | 2017-05-04 05:14:39 -0700 | [diff] [blame] | 86 | static void tid_rb_remove(void *arg, struct mmu_rb_node *node); |
| 87 | static int tid_rb_invalidate(void *arg, struct mmu_rb_node *mnode); |
Michael J. Ruhl | 5042cdd | 2017-05-04 05:14:45 -0700 | [diff] [blame] | 88 | static int program_rcvarray(struct hfi1_filedata *fd, unsigned long vaddr, |
| 89 | struct tid_group *grp, struct tid_pageset *sets, |
Michael J. Ruhl | f4cd876 | 2017-05-04 05:14:39 -0700 | [diff] [blame] | 90 | unsigned start, u16 count, struct page **pages, |
| 91 | u32 *tidlist, unsigned *tididx, unsigned *pmapped); |
Michael J. Ruhl | 5042cdd | 2017-05-04 05:14:45 -0700 | [diff] [blame] | 92 | static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo, |
Michael J. Ruhl | f4cd876 | 2017-05-04 05:14:39 -0700 | [diff] [blame] | 93 | struct tid_group **grp); |
Ira Weiny | 2677a76 | 2016-07-28 15:21:26 -0400 | [diff] [blame] | 94 | static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node); |
Mitko Haralanov | 06e0ffa | 2016-03-08 11:14:20 -0800 | [diff] [blame] | 95 | |
| 96 | static struct mmu_rb_ops tid_rb_ops = { |
Dean Luick | a7cd2dc | 2016-07-28 12:27:37 -0400 | [diff] [blame] | 97 | .insert = tid_rb_insert, |
| 98 | .remove = tid_rb_remove, |
| 99 | .invalidate = tid_rb_invalidate |
Mitko Haralanov | 06e0ffa | 2016-03-08 11:14:20 -0800 | [diff] [blame] | 100 | }; |
Mitko Haralanov | f88e0c8 | 2016-02-05 11:57:52 -0500 | [diff] [blame] | 101 | |
Mitko Haralanov | f727a0c | 2016-02-05 11:57:46 -0500 | [diff] [blame] | 102 | /* |
| 103 | * Initialize context and file private data needed for Expected |
| 104 | * receive caching. This needs to be done after the context has |
| 105 | * been configured with the eager/expected RcvEntry counts. |
| 106 | */ |
Michael J. Ruhl | 5042cdd | 2017-05-04 05:14:45 -0700 | [diff] [blame] | 107 | int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd) |
Mitko Haralanov | f727a0c | 2016-02-05 11:57:46 -0500 | [diff] [blame] | 108 | { |
Mitko Haralanov | 3abb33a | 2016-02-05 11:57:54 -0500 | [diff] [blame] | 109 | struct hfi1_ctxtdata *uctxt = fd->uctxt; |
| 110 | struct hfi1_devdata *dd = uctxt->dd; |
Michael J. Ruhl | 9b60d2c | 2017-05-04 05:15:09 -0700 | [diff] [blame] | 111 | int ret = 0; |
Mitko Haralanov | 3abb33a | 2016-02-05 11:57:54 -0500 | [diff] [blame] | 112 | |
Mitko Haralanov | 3abb33a | 2016-02-05 11:57:54 -0500 | [diff] [blame] | 113 | spin_lock_init(&fd->tid_lock); |
| 114 | spin_lock_init(&fd->invalid_lock); |
Mitko Haralanov | 3abb33a | 2016-02-05 11:57:54 -0500 | [diff] [blame] | 115 | |
Mitko Haralanov | a92ba6d | 2016-02-03 14:34:41 -0800 | [diff] [blame] | 116 | fd->entry_to_rb = kcalloc(uctxt->expected_count, |
Michael J. Ruhl | 9b60d2c | 2017-05-04 05:15:09 -0700 | [diff] [blame] | 117 | sizeof(struct rb_node *), |
| 118 | GFP_KERNEL); |
Mitko Haralanov | a92ba6d | 2016-02-03 14:34:41 -0800 | [diff] [blame] | 119 | if (!fd->entry_to_rb) |
| 120 | return -ENOMEM; |
| 121 | |
Dean Luick | 622c202 | 2016-07-28 15:21:21 -0400 | [diff] [blame] | 122 | if (!HFI1_CAP_UGET_MASK(uctxt->flags, TID_UNMAP)) { |
Mitko Haralanov | 3abb33a | 2016-02-05 11:57:54 -0500 | [diff] [blame] | 123 | fd->invalid_tid_idx = 0; |
Markus Elfring | 4076e51 | 2017-02-09 15:30:53 +0100 | [diff] [blame] | 124 | fd->invalid_tids = kcalloc(uctxt->expected_count, |
| 125 | sizeof(*fd->invalid_tids), |
| 126 | GFP_KERNEL); |
Michael J. Ruhl | 62239fc | 2017-05-04 05:15:21 -0700 | [diff] [blame] | 127 | if (!fd->invalid_tids) { |
| 128 | kfree(fd->entry_to_rb); |
| 129 | fd->entry_to_rb = NULL; |
Michael J. Ruhl | 9b60d2c | 2017-05-04 05:15:09 -0700 | [diff] [blame] | 130 | return -ENOMEM; |
Michael J. Ruhl | 62239fc | 2017-05-04 05:15:21 -0700 | [diff] [blame] | 131 | } |
Mitko Haralanov | a92ba6d | 2016-02-03 14:34:41 -0800 | [diff] [blame] | 132 | |
| 133 | /* |
| 134 | * Register MMU notifier callbacks. If the registration |
Dean Luick | 622c202 | 2016-07-28 15:21:21 -0400 | [diff] [blame] | 135 | * fails, continue without TID caching for this context. |
Mitko Haralanov | a92ba6d | 2016-02-03 14:34:41 -0800 | [diff] [blame] | 136 | */ |
Dean Luick | b85ced9 | 2016-07-28 15:21:24 -0400 | [diff] [blame] | 137 | ret = hfi1_mmu_rb_register(fd, fd->mm, &tid_rb_ops, |
| 138 | dd->pport->hfi1_wq, |
| 139 | &fd->handler); |
Mitko Haralanov | a92ba6d | 2016-02-03 14:34:41 -0800 | [diff] [blame] | 140 | if (ret) { |
| 141 | dd_dev_info(dd, |
| 142 | "Failed MMU notifier registration %d\n", |
| 143 | ret); |
Mitko Haralanov | a92ba6d | 2016-02-03 14:34:41 -0800 | [diff] [blame] | 144 | ret = 0; |
Mitko Haralanov | 3abb33a | 2016-02-05 11:57:54 -0500 | [diff] [blame] | 145 | } |
| 146 | } |
| 147 | |
Mitko Haralanov | 3abb33a | 2016-02-05 11:57:54 -0500 | [diff] [blame] | 148 | /* |
| 149 | * PSM does not have a good way to separate, count, and |
| 150 | * effectively enforce a limit on RcvArray entries used by |
| 151 | * subctxts (when context sharing is used) when TID caching |
| 152 | * is enabled. To help with that, we calculate a per-process |
| 153 | * RcvArray entry share and enforce that. |
| 154 | * If TID caching is not in use, PSM deals with usage on its |
| 155 | * own. In that case, we allow any subctxt to take all of the |
| 156 | * entries. |
| 157 | * |
| 158 | * Make sure that we set the tid counts only after successful |
| 159 | * init. |
| 160 | */ |
Mitko Haralanov | 455d7f1 | 2016-02-05 11:57:56 -0500 | [diff] [blame] | 161 | spin_lock(&fd->tid_lock); |
Dean Luick | 622c202 | 2016-07-28 15:21:21 -0400 | [diff] [blame] | 162 | if (uctxt->subctxt_cnt && fd->handler) { |
Mitko Haralanov | 3abb33a | 2016-02-05 11:57:54 -0500 | [diff] [blame] | 163 | u16 remainder; |
| 164 | |
| 165 | fd->tid_limit = uctxt->expected_count / uctxt->subctxt_cnt; |
| 166 | remainder = uctxt->expected_count % uctxt->subctxt_cnt; |
| 167 | if (remainder && fd->subctxt < remainder) |
| 168 | fd->tid_limit++; |
| 169 | } else { |
| 170 | fd->tid_limit = uctxt->expected_count; |
| 171 | } |
Mitko Haralanov | 455d7f1 | 2016-02-05 11:57:56 -0500 | [diff] [blame] | 172 | spin_unlock(&fd->tid_lock); |
Michael J. Ruhl | 9b60d2c | 2017-05-04 05:15:09 -0700 | [diff] [blame] | 173 | |
Mitko Haralanov | 3abb33a | 2016-02-05 11:57:54 -0500 | [diff] [blame] | 174 | return ret; |
Mitko Haralanov | f727a0c | 2016-02-05 11:57:46 -0500 | [diff] [blame] | 175 | } |
| 176 | |
Michael J. Ruhl | 9b60d2c | 2017-05-04 05:15:09 -0700 | [diff] [blame] | 177 | void hfi1_user_exp_rcv_free(struct hfi1_filedata *fd) |
Mitko Haralanov | f727a0c | 2016-02-05 11:57:46 -0500 | [diff] [blame] | 178 | { |
Mitko Haralanov | 3abb33a | 2016-02-05 11:57:54 -0500 | [diff] [blame] | 179 | struct hfi1_ctxtdata *uctxt = fd->uctxt; |
Mitko Haralanov | 3abb33a | 2016-02-05 11:57:54 -0500 | [diff] [blame] | 180 | |
| 181 | /* |
| 182 | * The notifier would have been removed when the process'es mm |
| 183 | * was freed. |
| 184 | */ |
Michael J. Ruhl | 224d71f | 2017-05-04 05:14:34 -0700 | [diff] [blame] | 185 | if (fd->handler) { |
Dean Luick | e0b09ac | 2016-07-28 15:21:20 -0400 | [diff] [blame] | 186 | hfi1_mmu_rb_unregister(fd->handler); |
Michael J. Ruhl | 224d71f | 2017-05-04 05:14:34 -0700 | [diff] [blame] | 187 | } else { |
Mitko Haralanov | 3abb33a | 2016-02-05 11:57:54 -0500 | [diff] [blame] | 188 | if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list)) |
Dean Luick | e0b09ac | 2016-07-28 15:21:20 -0400 | [diff] [blame] | 189 | unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd); |
Mitko Haralanov | 3abb33a | 2016-02-05 11:57:54 -0500 | [diff] [blame] | 190 | if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list)) |
Dean Luick | e0b09ac | 2016-07-28 15:21:20 -0400 | [diff] [blame] | 191 | unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd); |
Mitko Haralanov | 3abb33a | 2016-02-05 11:57:54 -0500 | [diff] [blame] | 192 | } |
Mitko Haralanov | a92ba6d | 2016-02-03 14:34:41 -0800 | [diff] [blame] | 193 | |
Michael J. Ruhl | 224d71f | 2017-05-04 05:14:34 -0700 | [diff] [blame] | 194 | kfree(fd->invalid_tids); |
| 195 | fd->invalid_tids = NULL; |
| 196 | |
Mitko Haralanov | a92ba6d | 2016-02-03 14:34:41 -0800 | [diff] [blame] | 197 | kfree(fd->entry_to_rb); |
Michael J. Ruhl | 224d71f | 2017-05-04 05:14:34 -0700 | [diff] [blame] | 198 | fd->entry_to_rb = NULL; |
Mitko Haralanov | f727a0c | 2016-02-05 11:57:46 -0500 | [diff] [blame] | 199 | } |
| 200 | |
Mitko Haralanov | b8abe34 | 2016-02-05 11:57:51 -0500 | [diff] [blame] | 201 | /* |
Mitko Haralanov | 7e7a436e | 2016-02-05 11:57:57 -0500 | [diff] [blame] | 202 | * RcvArray entry allocation for Expected Receives is done by the |
| 203 | * following algorithm: |
| 204 | * |
| 205 | * The context keeps 3 lists of groups of RcvArray entries: |
| 206 | * 1. List of empty groups - tid_group_list |
| 207 | * This list is created during user context creation and |
| 208 | * contains elements which describe sets (of 8) of empty |
| 209 | * RcvArray entries. |
| 210 | * 2. List of partially used groups - tid_used_list |
| 211 | * This list contains sets of RcvArray entries which are |
| 212 | * not completely used up. Another mapping request could |
| 213 | * use some of all of the remaining entries. |
| 214 | * 3. List of full groups - tid_full_list |
| 215 | * This is the list where sets that are completely used |
| 216 | * up go. |
| 217 | * |
| 218 | * An attempt to optimize the usage of RcvArray entries is |
| 219 | * made by finding all sets of physically contiguous pages in a |
| 220 | * user's buffer. |
| 221 | * These physically contiguous sets are further split into |
| 222 | * sizes supported by the receive engine of the HFI. The |
| 223 | * resulting sets of pages are stored in struct tid_pageset, |
| 224 | * which describes the sets as: |
| 225 | * * .count - number of pages in this set |
| 226 | * * .idx - starting index into struct page ** array |
| 227 | * of this set |
| 228 | * |
| 229 | * From this point on, the algorithm deals with the page sets |
| 230 | * described above. The number of pagesets is divided by the |
| 231 | * RcvArray group size to produce the number of full groups |
| 232 | * needed. |
| 233 | * |
| 234 | * Groups from the 3 lists are manipulated using the following |
| 235 | * rules: |
| 236 | * 1. For each set of 8 pagesets, a complete group from |
| 237 | * tid_group_list is taken, programmed, and moved to |
| 238 | * the tid_full_list list. |
| 239 | * 2. For all remaining pagesets: |
| 240 | * 2.1 If the tid_used_list is empty and the tid_group_list |
| 241 | * is empty, stop processing pageset and return only |
| 242 | * what has been programmed up to this point. |
| 243 | * 2.2 If the tid_used_list is empty and the tid_group_list |
| 244 | * is not empty, move a group from tid_group_list to |
| 245 | * tid_used_list. |
| 246 | * 2.3 For each group is tid_used_group, program as much as |
| 247 | * can fit into the group. If the group becomes fully |
| 248 | * used, move it to tid_full_list. |
| 249 | */ |
Michael J. Ruhl | 5042cdd | 2017-05-04 05:14:45 -0700 | [diff] [blame] | 250 | int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd, |
| 251 | struct hfi1_tid_info *tinfo) |
Mitko Haralanov | f727a0c | 2016-02-05 11:57:46 -0500 | [diff] [blame] | 252 | { |
Mitko Haralanov | 7e7a436e | 2016-02-05 11:57:57 -0500 | [diff] [blame] | 253 | int ret = 0, need_group = 0, pinned; |
Mitko Haralanov | 7e7a436e | 2016-02-05 11:57:57 -0500 | [diff] [blame] | 254 | struct hfi1_ctxtdata *uctxt = fd->uctxt; |
| 255 | struct hfi1_devdata *dd = uctxt->dd; |
| 256 | unsigned npages, ngroups, pageidx = 0, pageset_count, npagesets, |
| 257 | tididx = 0, mapped, mapped_pages = 0; |
| 258 | unsigned long vaddr = tinfo->vaddr; |
| 259 | struct page **pages = NULL; |
| 260 | u32 *tidlist = NULL; |
| 261 | struct tid_pageset *pagesets = NULL; |
| 262 | |
| 263 | /* Get the number of pages the user buffer spans */ |
| 264 | npages = num_user_pages(vaddr, tinfo->length); |
| 265 | if (!npages) |
| 266 | return -EINVAL; |
| 267 | |
| 268 | if (npages > uctxt->expected_count) { |
| 269 | dd_dev_err(dd, "Expected buffer too big\n"); |
| 270 | return -EINVAL; |
| 271 | } |
| 272 | |
| 273 | /* Verify that access is OK for the user buffer */ |
| 274 | if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, |
| 275 | npages * PAGE_SIZE)) { |
| 276 | dd_dev_err(dd, "Fail vaddr %p, %u pages, !access_ok\n", |
| 277 | (void *)vaddr, npages); |
| 278 | return -EFAULT; |
| 279 | } |
| 280 | |
| 281 | pagesets = kcalloc(uctxt->expected_count, sizeof(*pagesets), |
| 282 | GFP_KERNEL); |
| 283 | if (!pagesets) |
| 284 | return -ENOMEM; |
| 285 | |
| 286 | /* Allocate the array of struct page pointers needed for pinning */ |
| 287 | pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL); |
| 288 | if (!pages) { |
| 289 | ret = -ENOMEM; |
| 290 | goto bail; |
| 291 | } |
| 292 | |
| 293 | /* |
| 294 | * Pin all the pages of the user buffer. If we can't pin all the |
| 295 | * pages, accept the amount pinned so far and program only that. |
| 296 | * User space knows how to deal with partially programmed buffers. |
| 297 | */ |
Ira Weiny | 3faa3d9 | 2016-07-28 15:21:19 -0400 | [diff] [blame] | 298 | if (!hfi1_can_pin_pages(dd, fd->mm, fd->tid_n_pinned, npages)) { |
Mitko Haralanov | 0ad2d3d | 2016-04-12 10:46:29 -0700 | [diff] [blame] | 299 | ret = -ENOMEM; |
| 300 | goto bail; |
| 301 | } |
| 302 | |
Ira Weiny | 3faa3d9 | 2016-07-28 15:21:19 -0400 | [diff] [blame] | 303 | pinned = hfi1_acquire_user_pages(fd->mm, vaddr, npages, true, pages); |
Mitko Haralanov | 7e7a436e | 2016-02-05 11:57:57 -0500 | [diff] [blame] | 304 | if (pinned <= 0) { |
| 305 | ret = pinned; |
| 306 | goto bail; |
| 307 | } |
Mitko Haralanov | a7922f7 | 2016-03-08 11:15:39 -0800 | [diff] [blame] | 308 | fd->tid_n_pinned += npages; |
Mitko Haralanov | 7e7a436e | 2016-02-05 11:57:57 -0500 | [diff] [blame] | 309 | |
| 310 | /* Find sets of physically contiguous pages */ |
| 311 | npagesets = find_phys_blocks(pages, pinned, pagesets); |
| 312 | |
| 313 | /* |
| 314 | * We don't need to access this under a lock since tid_used is per |
| 315 | * process and the same process cannot be in hfi1_user_exp_rcv_clear() |
| 316 | * and hfi1_user_exp_rcv_setup() at the same time. |
| 317 | */ |
| 318 | spin_lock(&fd->tid_lock); |
| 319 | if (fd->tid_used + npagesets > fd->tid_limit) |
| 320 | pageset_count = fd->tid_limit - fd->tid_used; |
| 321 | else |
| 322 | pageset_count = npagesets; |
| 323 | spin_unlock(&fd->tid_lock); |
| 324 | |
| 325 | if (!pageset_count) |
| 326 | goto bail; |
| 327 | |
| 328 | ngroups = pageset_count / dd->rcv_entries.group_size; |
| 329 | tidlist = kcalloc(pageset_count, sizeof(*tidlist), GFP_KERNEL); |
| 330 | if (!tidlist) { |
| 331 | ret = -ENOMEM; |
| 332 | goto nomem; |
| 333 | } |
| 334 | |
| 335 | tididx = 0; |
| 336 | |
| 337 | /* |
| 338 | * From this point on, we are going to be using shared (between master |
| 339 | * and subcontexts) context resources. We need to take the lock. |
| 340 | */ |
| 341 | mutex_lock(&uctxt->exp_lock); |
| 342 | /* |
| 343 | * The first step is to program the RcvArray entries which are complete |
| 344 | * groups. |
| 345 | */ |
| 346 | while (ngroups && uctxt->tid_group_list.count) { |
| 347 | struct tid_group *grp = |
| 348 | tid_group_pop(&uctxt->tid_group_list); |
| 349 | |
Michael J. Ruhl | 5042cdd | 2017-05-04 05:14:45 -0700 | [diff] [blame] | 350 | ret = program_rcvarray(fd, vaddr, grp, pagesets, |
Mitko Haralanov | 7e7a436e | 2016-02-05 11:57:57 -0500 | [diff] [blame] | 351 | pageidx, dd->rcv_entries.group_size, |
| 352 | pages, tidlist, &tididx, &mapped); |
| 353 | /* |
| 354 | * If there was a failure to program the RcvArray |
| 355 | * entries for the entire group, reset the grp fields |
| 356 | * and add the grp back to the free group list. |
| 357 | */ |
| 358 | if (ret <= 0) { |
| 359 | tid_group_add_tail(grp, &uctxt->tid_group_list); |
| 360 | hfi1_cdbg(TID, |
| 361 | "Failed to program RcvArray group %d", ret); |
| 362 | goto unlock; |
| 363 | } |
| 364 | |
| 365 | tid_group_add_tail(grp, &uctxt->tid_full_list); |
| 366 | ngroups--; |
| 367 | pageidx += ret; |
| 368 | mapped_pages += mapped; |
| 369 | } |
| 370 | |
| 371 | while (pageidx < pageset_count) { |
| 372 | struct tid_group *grp, *ptr; |
| 373 | /* |
| 374 | * If we don't have any partially used tid groups, check |
| 375 | * if we have empty groups. If so, take one from there and |
| 376 | * put in the partially used list. |
| 377 | */ |
| 378 | if (!uctxt->tid_used_list.count || need_group) { |
| 379 | if (!uctxt->tid_group_list.count) |
| 380 | goto unlock; |
| 381 | |
| 382 | grp = tid_group_pop(&uctxt->tid_group_list); |
| 383 | tid_group_add_tail(grp, &uctxt->tid_used_list); |
| 384 | need_group = 0; |
| 385 | } |
| 386 | /* |
| 387 | * There is an optimization opportunity here - instead of |
| 388 | * fitting as many page sets as we can, check for a group |
| 389 | * later on in the list that could fit all of them. |
| 390 | */ |
| 391 | list_for_each_entry_safe(grp, ptr, &uctxt->tid_used_list.list, |
| 392 | list) { |
| 393 | unsigned use = min_t(unsigned, pageset_count - pageidx, |
| 394 | grp->size - grp->used); |
| 395 | |
Michael J. Ruhl | 5042cdd | 2017-05-04 05:14:45 -0700 | [diff] [blame] | 396 | ret = program_rcvarray(fd, vaddr, grp, pagesets, |
Mitko Haralanov | 7e7a436e | 2016-02-05 11:57:57 -0500 | [diff] [blame] | 397 | pageidx, use, pages, tidlist, |
| 398 | &tididx, &mapped); |
| 399 | if (ret < 0) { |
| 400 | hfi1_cdbg(TID, |
| 401 | "Failed to program RcvArray entries %d", |
| 402 | ret); |
| 403 | ret = -EFAULT; |
| 404 | goto unlock; |
| 405 | } else if (ret > 0) { |
| 406 | if (grp->used == grp->size) |
| 407 | tid_group_move(grp, |
| 408 | &uctxt->tid_used_list, |
| 409 | &uctxt->tid_full_list); |
| 410 | pageidx += ret; |
| 411 | mapped_pages += mapped; |
| 412 | need_group = 0; |
| 413 | /* Check if we are done so we break out early */ |
| 414 | if (pageidx >= pageset_count) |
| 415 | break; |
| 416 | } else if (WARN_ON(ret == 0)) { |
| 417 | /* |
| 418 | * If ret is 0, we did not program any entries |
| 419 | * into this group, which can only happen if |
| 420 | * we've screwed up the accounting somewhere. |
| 421 | * Warn and try to continue. |
| 422 | */ |
| 423 | need_group = 1; |
| 424 | } |
| 425 | } |
| 426 | } |
| 427 | unlock: |
| 428 | mutex_unlock(&uctxt->exp_lock); |
| 429 | nomem: |
| 430 | hfi1_cdbg(TID, "total mapped: tidpairs:%u pages:%u (%d)", tididx, |
| 431 | mapped_pages, ret); |
| 432 | if (tididx) { |
| 433 | spin_lock(&fd->tid_lock); |
| 434 | fd->tid_used += tididx; |
| 435 | spin_unlock(&fd->tid_lock); |
| 436 | tinfo->tidcnt = tididx; |
| 437 | tinfo->length = mapped_pages * PAGE_SIZE; |
| 438 | |
| 439 | if (copy_to_user((void __user *)(unsigned long)tinfo->tidlist, |
| 440 | tidlist, sizeof(tidlist[0]) * tididx)) { |
| 441 | /* |
| 442 | * On failure to copy to the user level, we need to undo |
| 443 | * everything done so far so we don't leak resources. |
| 444 | */ |
| 445 | tinfo->tidlist = (unsigned long)&tidlist; |
Michael J. Ruhl | 5042cdd | 2017-05-04 05:14:45 -0700 | [diff] [blame] | 446 | hfi1_user_exp_rcv_clear(fd, tinfo); |
Mitko Haralanov | 7e7a436e | 2016-02-05 11:57:57 -0500 | [diff] [blame] | 447 | tinfo->tidlist = 0; |
| 448 | ret = -EFAULT; |
| 449 | goto bail; |
| 450 | } |
| 451 | } |
| 452 | |
| 453 | /* |
| 454 | * If not everything was mapped (due to insufficient RcvArray entries, |
| 455 | * for example), unpin all unmapped pages so we can pin them nex time. |
| 456 | */ |
Mitko Haralanov | a7922f7 | 2016-03-08 11:15:39 -0800 | [diff] [blame] | 457 | if (mapped_pages != pinned) { |
Ira Weiny | 3faa3d9 | 2016-07-28 15:21:19 -0400 | [diff] [blame] | 458 | hfi1_release_user_pages(fd->mm, &pages[mapped_pages], |
Mitko Haralanov | 7e7a436e | 2016-02-05 11:57:57 -0500 | [diff] [blame] | 459 | pinned - mapped_pages, |
| 460 | false); |
Mitko Haralanov | a7922f7 | 2016-03-08 11:15:39 -0800 | [diff] [blame] | 461 | fd->tid_n_pinned -= pinned - mapped_pages; |
| 462 | } |
Mitko Haralanov | 7e7a436e | 2016-02-05 11:57:57 -0500 | [diff] [blame] | 463 | bail: |
| 464 | kfree(pagesets); |
| 465 | kfree(pages); |
| 466 | kfree(tidlist); |
| 467 | return ret > 0 ? 0 : ret; |
Mitko Haralanov | f727a0c | 2016-02-05 11:57:46 -0500 | [diff] [blame] | 468 | } |
| 469 | |
Michael J. Ruhl | 5042cdd | 2017-05-04 05:14:45 -0700 | [diff] [blame] | 470 | int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd, |
| 471 | struct hfi1_tid_info *tinfo) |
Mitko Haralanov | f727a0c | 2016-02-05 11:57:46 -0500 | [diff] [blame] | 472 | { |
Mitko Haralanov | 455d7f1 | 2016-02-05 11:57:56 -0500 | [diff] [blame] | 473 | int ret = 0; |
Mitko Haralanov | 455d7f1 | 2016-02-05 11:57:56 -0500 | [diff] [blame] | 474 | struct hfi1_ctxtdata *uctxt = fd->uctxt; |
| 475 | u32 *tidinfo; |
| 476 | unsigned tididx; |
| 477 | |
Michael J. Ruhl | db73089 | 2017-04-09 10:16:03 -0700 | [diff] [blame] | 478 | if (unlikely(tinfo->tidcnt > fd->tid_used)) |
| 479 | return -EINVAL; |
| 480 | |
Michael J. Ruhl | 1bb0d7b | 2017-02-08 05:28:31 -0800 | [diff] [blame] | 481 | tidinfo = memdup_user((void __user *)(unsigned long)tinfo->tidlist, |
| 482 | sizeof(tidinfo[0]) * tinfo->tidcnt); |
| 483 | if (IS_ERR(tidinfo)) |
| 484 | return PTR_ERR(tidinfo); |
Mitko Haralanov | 455d7f1 | 2016-02-05 11:57:56 -0500 | [diff] [blame] | 485 | |
| 486 | mutex_lock(&uctxt->exp_lock); |
| 487 | for (tididx = 0; tididx < tinfo->tidcnt; tididx++) { |
Michael J. Ruhl | 5042cdd | 2017-05-04 05:14:45 -0700 | [diff] [blame] | 488 | ret = unprogram_rcvarray(fd, tidinfo[tididx], NULL); |
Mitko Haralanov | 455d7f1 | 2016-02-05 11:57:56 -0500 | [diff] [blame] | 489 | if (ret) { |
| 490 | hfi1_cdbg(TID, "Failed to unprogram rcv array %d", |
| 491 | ret); |
| 492 | break; |
| 493 | } |
| 494 | } |
| 495 | spin_lock(&fd->tid_lock); |
| 496 | fd->tid_used -= tididx; |
| 497 | spin_unlock(&fd->tid_lock); |
| 498 | tinfo->tidcnt = tididx; |
| 499 | mutex_unlock(&uctxt->exp_lock); |
Michael J. Ruhl | 1bb0d7b | 2017-02-08 05:28:31 -0800 | [diff] [blame] | 500 | |
Mitko Haralanov | 455d7f1 | 2016-02-05 11:57:56 -0500 | [diff] [blame] | 501 | kfree(tidinfo); |
| 502 | return ret; |
Mitko Haralanov | f727a0c | 2016-02-05 11:57:46 -0500 | [diff] [blame] | 503 | } |
| 504 | |
Michael J. Ruhl | 5042cdd | 2017-05-04 05:14:45 -0700 | [diff] [blame] | 505 | int hfi1_user_exp_rcv_invalid(struct hfi1_filedata *fd, |
| 506 | struct hfi1_tid_info *tinfo) |
Mitko Haralanov | f727a0c | 2016-02-05 11:57:46 -0500 | [diff] [blame] | 507 | { |
Mitko Haralanov | 455d7f1 | 2016-02-05 11:57:56 -0500 | [diff] [blame] | 508 | struct hfi1_ctxtdata *uctxt = fd->uctxt; |
| 509 | unsigned long *ev = uctxt->dd->events + |
Vishwanathapura, Niranjana | 2280740 | 2017-04-12 20:29:29 -0700 | [diff] [blame] | 510 | (((uctxt->ctxt - uctxt->dd->first_dyn_alloc_ctxt) * |
Mitko Haralanov | 455d7f1 | 2016-02-05 11:57:56 -0500 | [diff] [blame] | 511 | HFI1_MAX_SHARED_CTXTS) + fd->subctxt); |
| 512 | u32 *array; |
| 513 | int ret = 0; |
| 514 | |
| 515 | if (!fd->invalid_tids) |
| 516 | return -EINVAL; |
| 517 | |
| 518 | /* |
| 519 | * copy_to_user() can sleep, which will leave the invalid_lock |
| 520 | * locked and cause the MMU notifier to be blocked on the lock |
| 521 | * for a long time. |
| 522 | * Copy the data to a local buffer so we can release the lock. |
| 523 | */ |
| 524 | array = kcalloc(uctxt->expected_count, sizeof(*array), GFP_KERNEL); |
| 525 | if (!array) |
| 526 | return -EFAULT; |
| 527 | |
| 528 | spin_lock(&fd->invalid_lock); |
| 529 | if (fd->invalid_tid_idx) { |
| 530 | memcpy(array, fd->invalid_tids, sizeof(*array) * |
| 531 | fd->invalid_tid_idx); |
| 532 | memset(fd->invalid_tids, 0, sizeof(*fd->invalid_tids) * |
| 533 | fd->invalid_tid_idx); |
| 534 | tinfo->tidcnt = fd->invalid_tid_idx; |
| 535 | fd->invalid_tid_idx = 0; |
| 536 | /* |
| 537 | * Reset the user flag while still holding the lock. |
| 538 | * Otherwise, PSM can miss events. |
| 539 | */ |
| 540 | clear_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev); |
| 541 | } else { |
| 542 | tinfo->tidcnt = 0; |
| 543 | } |
| 544 | spin_unlock(&fd->invalid_lock); |
| 545 | |
| 546 | if (tinfo->tidcnt) { |
| 547 | if (copy_to_user((void __user *)tinfo->tidlist, |
| 548 | array, sizeof(*array) * tinfo->tidcnt)) |
| 549 | ret = -EFAULT; |
| 550 | } |
| 551 | kfree(array); |
| 552 | |
| 553 | return ret; |
Mitko Haralanov | f727a0c | 2016-02-05 11:57:46 -0500 | [diff] [blame] | 554 | } |
| 555 | |
Mitko Haralanov | f88e0c8 | 2016-02-05 11:57:52 -0500 | [diff] [blame] | 556 | static u32 find_phys_blocks(struct page **pages, unsigned npages, |
| 557 | struct tid_pageset *list) |
| 558 | { |
| 559 | unsigned pagecount, pageidx, setcount = 0, i; |
| 560 | unsigned long pfn, this_pfn; |
| 561 | |
| 562 | if (!npages) |
| 563 | return 0; |
| 564 | |
| 565 | /* |
| 566 | * Look for sets of physically contiguous pages in the user buffer. |
| 567 | * This will allow us to optimize Expected RcvArray entry usage by |
| 568 | * using the bigger supported sizes. |
| 569 | */ |
| 570 | pfn = page_to_pfn(pages[0]); |
| 571 | for (pageidx = 0, pagecount = 1, i = 1; i <= npages; i++) { |
| 572 | this_pfn = i < npages ? page_to_pfn(pages[i]) : 0; |
| 573 | |
| 574 | /* |
| 575 | * If the pfn's are not sequential, pages are not physically |
| 576 | * contiguous. |
| 577 | */ |
| 578 | if (this_pfn != ++pfn) { |
| 579 | /* |
| 580 | * At this point we have to loop over the set of |
| 581 | * physically contiguous pages and break them down it |
| 582 | * sizes supported by the HW. |
| 583 | * There are two main constraints: |
| 584 | * 1. The max buffer size is MAX_EXPECTED_BUFFER. |
| 585 | * If the total set size is bigger than that |
| 586 | * program only a MAX_EXPECTED_BUFFER chunk. |
| 587 | * 2. The buffer size has to be a power of two. If |
| 588 | * it is not, round down to the closes power of |
| 589 | * 2 and program that size. |
| 590 | */ |
| 591 | while (pagecount) { |
| 592 | int maxpages = pagecount; |
| 593 | u32 bufsize = pagecount * PAGE_SIZE; |
| 594 | |
| 595 | if (bufsize > MAX_EXPECTED_BUFFER) |
| 596 | maxpages = |
| 597 | MAX_EXPECTED_BUFFER >> |
| 598 | PAGE_SHIFT; |
| 599 | else if (!is_power_of_2(bufsize)) |
| 600 | maxpages = |
| 601 | rounddown_pow_of_two(bufsize) >> |
| 602 | PAGE_SHIFT; |
| 603 | |
| 604 | list[setcount].idx = pageidx; |
| 605 | list[setcount].count = maxpages; |
| 606 | pagecount -= maxpages; |
| 607 | pageidx += maxpages; |
| 608 | setcount++; |
| 609 | } |
| 610 | pageidx = i; |
| 611 | pagecount = 1; |
| 612 | pfn = this_pfn; |
| 613 | } else { |
| 614 | pagecount++; |
| 615 | } |
| 616 | } |
| 617 | return setcount; |
| 618 | } |
| 619 | |
| 620 | /** |
| 621 | * program_rcvarray() - program an RcvArray group with receive buffers |
Michael J. Ruhl | 5042cdd | 2017-05-04 05:14:45 -0700 | [diff] [blame] | 622 | * @fd: filedata pointer |
Mitko Haralanov | f88e0c8 | 2016-02-05 11:57:52 -0500 | [diff] [blame] | 623 | * @vaddr: starting user virtual address |
| 624 | * @grp: RcvArray group |
| 625 | * @sets: array of struct tid_pageset holding information on physically |
| 626 | * contiguous chunks from the user buffer |
| 627 | * @start: starting index into sets array |
| 628 | * @count: number of struct tid_pageset's to program |
| 629 | * @pages: an array of struct page * for the user buffer |
| 630 | * @tidlist: the array of u32 elements when the information about the |
| 631 | * programmed RcvArray entries is to be encoded. |
| 632 | * @tididx: starting offset into tidlist |
| 633 | * @pmapped: (output parameter) number of pages programmed into the RcvArray |
| 634 | * entries. |
| 635 | * |
| 636 | * This function will program up to 'count' number of RcvArray entries from the |
| 637 | * group 'grp'. To make best use of write-combining writes, the function will |
| 638 | * perform writes to the unused RcvArray entries which will be ignored by the |
| 639 | * HW. Each RcvArray entry will be programmed with a physically contiguous |
| 640 | * buffer chunk from the user's virtual buffer. |
| 641 | * |
| 642 | * Return: |
| 643 | * -EINVAL if the requested count is larger than the size of the group, |
| 644 | * -ENOMEM or -EFAULT on error from set_rcvarray_entry(), or |
| 645 | * number of RcvArray entries programmed. |
| 646 | */ |
Michael J. Ruhl | 5042cdd | 2017-05-04 05:14:45 -0700 | [diff] [blame] | 647 | static int program_rcvarray(struct hfi1_filedata *fd, unsigned long vaddr, |
Mitko Haralanov | f88e0c8 | 2016-02-05 11:57:52 -0500 | [diff] [blame] | 648 | struct tid_group *grp, |
| 649 | struct tid_pageset *sets, |
| 650 | unsigned start, u16 count, struct page **pages, |
| 651 | u32 *tidlist, unsigned *tididx, unsigned *pmapped) |
| 652 | { |
Mitko Haralanov | f88e0c8 | 2016-02-05 11:57:52 -0500 | [diff] [blame] | 653 | struct hfi1_ctxtdata *uctxt = fd->uctxt; |
| 654 | struct hfi1_devdata *dd = uctxt->dd; |
| 655 | u16 idx; |
| 656 | u32 tidinfo = 0, rcventry, useidx = 0; |
| 657 | int mapped = 0; |
| 658 | |
| 659 | /* Count should never be larger than the group size */ |
| 660 | if (count > grp->size) |
| 661 | return -EINVAL; |
| 662 | |
| 663 | /* Find the first unused entry in the group */ |
| 664 | for (idx = 0; idx < grp->size; idx++) { |
| 665 | if (!(grp->map & (1 << idx))) { |
| 666 | useidx = idx; |
| 667 | break; |
| 668 | } |
| 669 | rcv_array_wc_fill(dd, grp->base + idx); |
| 670 | } |
| 671 | |
| 672 | idx = 0; |
| 673 | while (idx < count) { |
| 674 | u16 npages, pageidx, setidx = start + idx; |
| 675 | int ret = 0; |
| 676 | |
| 677 | /* |
| 678 | * If this entry in the group is used, move to the next one. |
| 679 | * If we go past the end of the group, exit the loop. |
| 680 | */ |
| 681 | if (useidx >= grp->size) { |
| 682 | break; |
| 683 | } else if (grp->map & (1 << useidx)) { |
| 684 | rcv_array_wc_fill(dd, grp->base + useidx); |
| 685 | useidx++; |
| 686 | continue; |
| 687 | } |
| 688 | |
| 689 | rcventry = grp->base + useidx; |
| 690 | npages = sets[setidx].count; |
| 691 | pageidx = sets[setidx].idx; |
| 692 | |
Michael J. Ruhl | 5042cdd | 2017-05-04 05:14:45 -0700 | [diff] [blame] | 693 | ret = set_rcvarray_entry(fd, vaddr + (pageidx * PAGE_SIZE), |
Mitko Haralanov | f88e0c8 | 2016-02-05 11:57:52 -0500 | [diff] [blame] | 694 | rcventry, grp, pages + pageidx, |
| 695 | npages); |
| 696 | if (ret) |
| 697 | return ret; |
| 698 | mapped += npages; |
| 699 | |
| 700 | tidinfo = rcventry2tidinfo(rcventry - uctxt->expected_base) | |
| 701 | EXP_TID_SET(LEN, npages); |
| 702 | tidlist[(*tididx)++] = tidinfo; |
| 703 | grp->used++; |
| 704 | grp->map |= 1 << useidx++; |
| 705 | idx++; |
| 706 | } |
| 707 | |
| 708 | /* Fill the rest of the group with "blank" writes */ |
| 709 | for (; useidx < grp->size; useidx++) |
| 710 | rcv_array_wc_fill(dd, grp->base + useidx); |
| 711 | *pmapped = mapped; |
| 712 | return idx; |
| 713 | } |
| 714 | |
Michael J. Ruhl | 5042cdd | 2017-05-04 05:14:45 -0700 | [diff] [blame] | 715 | static int set_rcvarray_entry(struct hfi1_filedata *fd, unsigned long vaddr, |
Mitko Haralanov | f88e0c8 | 2016-02-05 11:57:52 -0500 | [diff] [blame] | 716 | u32 rcventry, struct tid_group *grp, |
| 717 | struct page **pages, unsigned npages) |
| 718 | { |
| 719 | int ret; |
Mitko Haralanov | f88e0c8 | 2016-02-05 11:57:52 -0500 | [diff] [blame] | 720 | struct hfi1_ctxtdata *uctxt = fd->uctxt; |
Mitko Haralanov | 06e0ffa | 2016-03-08 11:14:20 -0800 | [diff] [blame] | 721 | struct tid_rb_node *node; |
Mitko Haralanov | f88e0c8 | 2016-02-05 11:57:52 -0500 | [diff] [blame] | 722 | struct hfi1_devdata *dd = uctxt->dd; |
Mitko Haralanov | f88e0c8 | 2016-02-05 11:57:52 -0500 | [diff] [blame] | 723 | dma_addr_t phys; |
| 724 | |
| 725 | /* |
| 726 | * Allocate the node first so we can handle a potential |
| 727 | * failure before we've programmed anything. |
| 728 | */ |
| 729 | node = kzalloc(sizeof(*node) + (sizeof(struct page *) * npages), |
| 730 | GFP_KERNEL); |
| 731 | if (!node) |
| 732 | return -ENOMEM; |
| 733 | |
| 734 | phys = pci_map_single(dd->pcidev, |
| 735 | __va(page_to_phys(pages[0])), |
| 736 | npages * PAGE_SIZE, PCI_DMA_FROMDEVICE); |
| 737 | if (dma_mapping_error(&dd->pcidev->dev, phys)) { |
| 738 | dd_dev_err(dd, "Failed to DMA map Exp Rcv pages 0x%llx\n", |
| 739 | phys); |
| 740 | kfree(node); |
| 741 | return -EFAULT; |
| 742 | } |
| 743 | |
Mitko Haralanov | 06e0ffa | 2016-03-08 11:14:20 -0800 | [diff] [blame] | 744 | node->mmu.addr = vaddr; |
| 745 | node->mmu.len = npages * PAGE_SIZE; |
Mitko Haralanov | f88e0c8 | 2016-02-05 11:57:52 -0500 | [diff] [blame] | 746 | node->phys = page_to_phys(pages[0]); |
Mitko Haralanov | f88e0c8 | 2016-02-05 11:57:52 -0500 | [diff] [blame] | 747 | node->npages = npages; |
| 748 | node->rcventry = rcventry; |
| 749 | node->dma_addr = phys; |
| 750 | node->grp = grp; |
| 751 | node->freed = false; |
| 752 | memcpy(node->pages, pages, sizeof(struct page *) * npages); |
| 753 | |
Dean Luick | 622c202 | 2016-07-28 15:21:21 -0400 | [diff] [blame] | 754 | if (!fd->handler) |
Dean Luick | e0b09ac | 2016-07-28 15:21:20 -0400 | [diff] [blame] | 755 | ret = tid_rb_insert(fd, &node->mmu); |
Mitko Haralanov | 368f2b5 | 2016-03-08 11:14:42 -0800 | [diff] [blame] | 756 | else |
Dean Luick | e0b09ac | 2016-07-28 15:21:20 -0400 | [diff] [blame] | 757 | ret = hfi1_mmu_rb_insert(fd->handler, &node->mmu); |
Mitko Haralanov | f88e0c8 | 2016-02-05 11:57:52 -0500 | [diff] [blame] | 758 | |
| 759 | if (ret) { |
| 760 | hfi1_cdbg(TID, "Failed to insert RB node %u 0x%lx, 0x%lx %d", |
Mitko Haralanov | 06e0ffa | 2016-03-08 11:14:20 -0800 | [diff] [blame] | 761 | node->rcventry, node->mmu.addr, node->phys, ret); |
Mitko Haralanov | f88e0c8 | 2016-02-05 11:57:52 -0500 | [diff] [blame] | 762 | pci_unmap_single(dd->pcidev, phys, npages * PAGE_SIZE, |
| 763 | PCI_DMA_FROMDEVICE); |
| 764 | kfree(node); |
| 765 | return -EFAULT; |
| 766 | } |
| 767 | hfi1_put_tid(dd, rcventry, PT_EXPECTED, phys, ilog2(npages) + 1); |
Mitko Haralanov | 06e0ffa | 2016-03-08 11:14:20 -0800 | [diff] [blame] | 768 | trace_hfi1_exp_tid_reg(uctxt->ctxt, fd->subctxt, rcventry, npages, |
| 769 | node->mmu.addr, node->phys, phys); |
Mitko Haralanov | f88e0c8 | 2016-02-05 11:57:52 -0500 | [diff] [blame] | 770 | return 0; |
| 771 | } |
| 772 | |
Michael J. Ruhl | 5042cdd | 2017-05-04 05:14:45 -0700 | [diff] [blame] | 773 | static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo, |
Mitko Haralanov | f88e0c8 | 2016-02-05 11:57:52 -0500 | [diff] [blame] | 774 | struct tid_group **grp) |
| 775 | { |
Mitko Haralanov | f88e0c8 | 2016-02-05 11:57:52 -0500 | [diff] [blame] | 776 | struct hfi1_ctxtdata *uctxt = fd->uctxt; |
| 777 | struct hfi1_devdata *dd = uctxt->dd; |
Mitko Haralanov | 06e0ffa | 2016-03-08 11:14:20 -0800 | [diff] [blame] | 778 | struct tid_rb_node *node; |
Mitko Haralanov | f88e0c8 | 2016-02-05 11:57:52 -0500 | [diff] [blame] | 779 | u8 tidctrl = EXP_TID_GET(tidinfo, CTRL); |
Mitko Haralanov | a92ba6d | 2016-02-03 14:34:41 -0800 | [diff] [blame] | 780 | u32 tididx = EXP_TID_GET(tidinfo, IDX) << 1, rcventry; |
Mitko Haralanov | f88e0c8 | 2016-02-05 11:57:52 -0500 | [diff] [blame] | 781 | |
| 782 | if (tididx >= uctxt->expected_count) { |
| 783 | dd_dev_err(dd, "Invalid RcvArray entry (%u) index for ctxt %u\n", |
| 784 | tididx, uctxt->ctxt); |
| 785 | return -EINVAL; |
| 786 | } |
| 787 | |
| 788 | if (tidctrl == 0x3) |
| 789 | return -EINVAL; |
| 790 | |
Mitko Haralanov | a92ba6d | 2016-02-03 14:34:41 -0800 | [diff] [blame] | 791 | rcventry = tididx + (tidctrl - 1); |
Mitko Haralanov | f88e0c8 | 2016-02-05 11:57:52 -0500 | [diff] [blame] | 792 | |
Mitko Haralanov | a92ba6d | 2016-02-03 14:34:41 -0800 | [diff] [blame] | 793 | node = fd->entry_to_rb[rcventry]; |
Mitko Haralanov | 06e0ffa | 2016-03-08 11:14:20 -0800 | [diff] [blame] | 794 | if (!node || node->rcventry != (uctxt->expected_base + rcventry)) |
Mitko Haralanov | f88e0c8 | 2016-02-05 11:57:52 -0500 | [diff] [blame] | 795 | return -EBADF; |
Mitko Haralanov | 06e0ffa | 2016-03-08 11:14:20 -0800 | [diff] [blame] | 796 | |
Mitko Haralanov | f88e0c8 | 2016-02-05 11:57:52 -0500 | [diff] [blame] | 797 | if (grp) |
| 798 | *grp = node->grp; |
Ira Weiny | 2677a76 | 2016-07-28 15:21:26 -0400 | [diff] [blame] | 799 | |
| 800 | if (!fd->handler) |
| 801 | cacheless_tid_rb_remove(fd, node); |
| 802 | else |
| 803 | hfi1_mmu_rb_remove(fd->handler, &node->mmu); |
| 804 | |
Mitko Haralanov | f88e0c8 | 2016-02-05 11:57:52 -0500 | [diff] [blame] | 805 | return 0; |
| 806 | } |
| 807 | |
Ira Weiny | 5ed3b15 | 2016-07-28 12:27:32 -0400 | [diff] [blame] | 808 | static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node) |
Mitko Haralanov | f88e0c8 | 2016-02-05 11:57:52 -0500 | [diff] [blame] | 809 | { |
| 810 | struct hfi1_ctxtdata *uctxt = fd->uctxt; |
| 811 | struct hfi1_devdata *dd = uctxt->dd; |
| 812 | |
Mitko Haralanov | 0b091fb | 2016-02-05 11:57:58 -0500 | [diff] [blame] | 813 | trace_hfi1_exp_tid_unreg(uctxt->ctxt, fd->subctxt, node->rcventry, |
Mitko Haralanov | 06e0ffa | 2016-03-08 11:14:20 -0800 | [diff] [blame] | 814 | node->npages, node->mmu.addr, node->phys, |
Mitko Haralanov | 0b091fb | 2016-02-05 11:57:58 -0500 | [diff] [blame] | 815 | node->dma_addr); |
| 816 | |
Mitko Haralanov | f88e0c8 | 2016-02-05 11:57:52 -0500 | [diff] [blame] | 817 | /* |
| 818 | * Make sure device has seen the write before we unpin the |
| 819 | * pages. |
| 820 | */ |
Mike Marciniszyn | cb51c5d | 2017-07-24 07:45:31 -0700 | [diff] [blame^] | 821 | hfi1_put_tid(dd, node->rcventry, PT_INVALID_FLUSH, 0, 0); |
Mitko Haralanov | f88e0c8 | 2016-02-05 11:57:52 -0500 | [diff] [blame] | 822 | |
Mitko Haralanov | 06e0ffa | 2016-03-08 11:14:20 -0800 | [diff] [blame] | 823 | pci_unmap_single(dd->pcidev, node->dma_addr, node->mmu.len, |
Mitko Haralanov | f88e0c8 | 2016-02-05 11:57:52 -0500 | [diff] [blame] | 824 | PCI_DMA_FROMDEVICE); |
Ira Weiny | 3faa3d9 | 2016-07-28 15:21:19 -0400 | [diff] [blame] | 825 | hfi1_release_user_pages(fd->mm, node->pages, node->npages, true); |
Mitko Haralanov | a7922f7 | 2016-03-08 11:15:39 -0800 | [diff] [blame] | 826 | fd->tid_n_pinned -= node->npages; |
Mitko Haralanov | f88e0c8 | 2016-02-05 11:57:52 -0500 | [diff] [blame] | 827 | |
| 828 | node->grp->used--; |
| 829 | node->grp->map &= ~(1 << (node->rcventry - node->grp->base)); |
| 830 | |
| 831 | if (node->grp->used == node->grp->size - 1) |
| 832 | tid_group_move(node->grp, &uctxt->tid_full_list, |
| 833 | &uctxt->tid_used_list); |
| 834 | else if (!node->grp->used) |
| 835 | tid_group_move(node->grp, &uctxt->tid_used_list, |
| 836 | &uctxt->tid_group_list); |
| 837 | kfree(node); |
| 838 | } |
| 839 | |
Ira Weiny | 2677a76 | 2016-07-28 15:21:26 -0400 | [diff] [blame] | 840 | /* |
| 841 | * As a simple helper for hfi1_user_exp_rcv_free, this function deals with |
| 842 | * clearing nodes in the non-cached case. |
| 843 | */ |
Mitko Haralanov | f88e0c8 | 2016-02-05 11:57:52 -0500 | [diff] [blame] | 844 | static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt, |
Dean Luick | e0b09ac | 2016-07-28 15:21:20 -0400 | [diff] [blame] | 845 | struct exp_tid_set *set, |
| 846 | struct hfi1_filedata *fd) |
Mitko Haralanov | f88e0c8 | 2016-02-05 11:57:52 -0500 | [diff] [blame] | 847 | { |
| 848 | struct tid_group *grp, *ptr; |
Mitko Haralanov | f88e0c8 | 2016-02-05 11:57:52 -0500 | [diff] [blame] | 849 | int i; |
| 850 | |
| 851 | list_for_each_entry_safe(grp, ptr, &set->list, list) { |
| 852 | list_del_init(&grp->list); |
| 853 | |
Mitko Haralanov | f88e0c8 | 2016-02-05 11:57:52 -0500 | [diff] [blame] | 854 | for (i = 0; i < grp->size; i++) { |
| 855 | if (grp->map & (1 << i)) { |
| 856 | u16 rcventry = grp->base + i; |
Mitko Haralanov | 06e0ffa | 2016-03-08 11:14:20 -0800 | [diff] [blame] | 857 | struct tid_rb_node *node; |
Mitko Haralanov | f88e0c8 | 2016-02-05 11:57:52 -0500 | [diff] [blame] | 858 | |
Mitko Haralanov | a92ba6d | 2016-02-03 14:34:41 -0800 | [diff] [blame] | 859 | node = fd->entry_to_rb[rcventry - |
| 860 | uctxt->expected_base]; |
| 861 | if (!node || node->rcventry != rcventry) |
Mitko Haralanov | f88e0c8 | 2016-02-05 11:57:52 -0500 | [diff] [blame] | 862 | continue; |
Ira Weiny | 2677a76 | 2016-07-28 15:21:26 -0400 | [diff] [blame] | 863 | |
| 864 | cacheless_tid_rb_remove(fd, node); |
Mitko Haralanov | f88e0c8 | 2016-02-05 11:57:52 -0500 | [diff] [blame] | 865 | } |
| 866 | } |
Mitko Haralanov | f88e0c8 | 2016-02-05 11:57:52 -0500 | [diff] [blame] | 867 | } |
| 868 | } |
| 869 | |
Ira Weiny | 2677a76 | 2016-07-28 15:21:26 -0400 | [diff] [blame] | 870 | /* |
| 871 | * Always return 0 from this function. A non-zero return indicates that the |
| 872 | * remove operation will be called and that memory should be unpinned. |
| 873 | * However, the driver cannot unpin out from under PSM. Instead, retain the |
| 874 | * memory (by returning 0) and inform PSM that the memory is going away. PSM |
| 875 | * will call back later when it has removed the memory from its list. |
| 876 | */ |
Dean Luick | e0b09ac | 2016-07-28 15:21:20 -0400 | [diff] [blame] | 877 | static int tid_rb_invalidate(void *arg, struct mmu_rb_node *mnode) |
Mitko Haralanov | f727a0c | 2016-02-05 11:57:46 -0500 | [diff] [blame] | 878 | { |
Dean Luick | e0b09ac | 2016-07-28 15:21:20 -0400 | [diff] [blame] | 879 | struct hfi1_filedata *fdata = arg; |
Mitko Haralanov | 06e0ffa | 2016-03-08 11:14:20 -0800 | [diff] [blame] | 880 | struct hfi1_ctxtdata *uctxt = fdata->uctxt; |
| 881 | struct tid_rb_node *node = |
| 882 | container_of(mnode, struct tid_rb_node, mmu); |
Mitko Haralanov | f727a0c | 2016-02-05 11:57:46 -0500 | [diff] [blame] | 883 | |
Mitko Haralanov | 06e0ffa | 2016-03-08 11:14:20 -0800 | [diff] [blame] | 884 | if (node->freed) |
| 885 | return 0; |
Mitko Haralanov | f727a0c | 2016-02-05 11:57:46 -0500 | [diff] [blame] | 886 | |
Mitko Haralanov | 06e0ffa | 2016-03-08 11:14:20 -0800 | [diff] [blame] | 887 | trace_hfi1_exp_tid_inval(uctxt->ctxt, fdata->subctxt, node->mmu.addr, |
| 888 | node->rcventry, node->npages, node->dma_addr); |
| 889 | node->freed = true; |
Mitko Haralanov | b5eb3b2 | 2016-02-05 11:57:55 -0500 | [diff] [blame] | 890 | |
Mitko Haralanov | 06e0ffa | 2016-03-08 11:14:20 -0800 | [diff] [blame] | 891 | spin_lock(&fdata->invalid_lock); |
| 892 | if (fdata->invalid_tid_idx < uctxt->expected_count) { |
| 893 | fdata->invalid_tids[fdata->invalid_tid_idx] = |
| 894 | rcventry2tidinfo(node->rcventry - uctxt->expected_base); |
| 895 | fdata->invalid_tids[fdata->invalid_tid_idx] |= |
| 896 | EXP_TID_SET(LEN, node->npages); |
| 897 | if (!fdata->invalid_tid_idx) { |
| 898 | unsigned long *ev; |
Mitko Haralanov | 0b091fb | 2016-02-05 11:57:58 -0500 | [diff] [blame] | 899 | |
Mitko Haralanov | b5eb3b2 | 2016-02-05 11:57:55 -0500 | [diff] [blame] | 900 | /* |
Mitko Haralanov | 06e0ffa | 2016-03-08 11:14:20 -0800 | [diff] [blame] | 901 | * hfi1_set_uevent_bits() sets a user event flag |
| 902 | * for all processes. Because calling into the |
| 903 | * driver to process TID cache invalidations is |
| 904 | * expensive and TID cache invalidations are |
| 905 | * handled on a per-process basis, we can |
| 906 | * optimize this to set the flag only for the |
| 907 | * process in question. |
Mitko Haralanov | b5eb3b2 | 2016-02-05 11:57:55 -0500 | [diff] [blame] | 908 | */ |
Mitko Haralanov | 06e0ffa | 2016-03-08 11:14:20 -0800 | [diff] [blame] | 909 | ev = uctxt->dd->events + |
Vishwanathapura, Niranjana | 2280740 | 2017-04-12 20:29:29 -0700 | [diff] [blame] | 910 | (((uctxt->ctxt - uctxt->dd->first_dyn_alloc_ctxt) * |
| 911 | HFI1_MAX_SHARED_CTXTS) + fdata->subctxt); |
Mitko Haralanov | 06e0ffa | 2016-03-08 11:14:20 -0800 | [diff] [blame] | 912 | set_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev); |
Mitko Haralanov | b5eb3b2 | 2016-02-05 11:57:55 -0500 | [diff] [blame] | 913 | } |
Mitko Haralanov | 06e0ffa | 2016-03-08 11:14:20 -0800 | [diff] [blame] | 914 | fdata->invalid_tid_idx++; |
Mitko Haralanov | b5eb3b2 | 2016-02-05 11:57:55 -0500 | [diff] [blame] | 915 | } |
Mitko Haralanov | 06e0ffa | 2016-03-08 11:14:20 -0800 | [diff] [blame] | 916 | spin_unlock(&fdata->invalid_lock); |
| 917 | return 0; |
Mitko Haralanov | f727a0c | 2016-02-05 11:57:46 -0500 | [diff] [blame] | 918 | } |
| 919 | |
Dean Luick | e0b09ac | 2016-07-28 15:21:20 -0400 | [diff] [blame] | 920 | static int tid_rb_insert(void *arg, struct mmu_rb_node *node) |
Mitko Haralanov | f727a0c | 2016-02-05 11:57:46 -0500 | [diff] [blame] | 921 | { |
Dean Luick | e0b09ac | 2016-07-28 15:21:20 -0400 | [diff] [blame] | 922 | struct hfi1_filedata *fdata = arg; |
Mitko Haralanov | 06e0ffa | 2016-03-08 11:14:20 -0800 | [diff] [blame] | 923 | struct tid_rb_node *tnode = |
| 924 | container_of(node, struct tid_rb_node, mmu); |
Mitko Haralanov | a92ba6d | 2016-02-03 14:34:41 -0800 | [diff] [blame] | 925 | u32 base = fdata->uctxt->expected_base; |
Mitko Haralanov | f727a0c | 2016-02-05 11:57:46 -0500 | [diff] [blame] | 926 | |
Mitko Haralanov | 06e0ffa | 2016-03-08 11:14:20 -0800 | [diff] [blame] | 927 | fdata->entry_to_rb[tnode->rcventry - base] = tnode; |
Mitko Haralanov | f727a0c | 2016-02-05 11:57:46 -0500 | [diff] [blame] | 928 | return 0; |
| 929 | } |
| 930 | |
Ira Weiny | 2677a76 | 2016-07-28 15:21:26 -0400 | [diff] [blame] | 931 | static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata, |
| 932 | struct tid_rb_node *tnode) |
| 933 | { |
| 934 | u32 base = fdata->uctxt->expected_base; |
| 935 | |
| 936 | fdata->entry_to_rb[tnode->rcventry - base] = NULL; |
| 937 | clear_tid_node(fdata, tnode); |
| 938 | } |
| 939 | |
Dean Luick | 082b353 | 2016-07-28 15:21:25 -0400 | [diff] [blame] | 940 | static void tid_rb_remove(void *arg, struct mmu_rb_node *node) |
Mitko Haralanov | f727a0c | 2016-02-05 11:57:46 -0500 | [diff] [blame] | 941 | { |
Dean Luick | e0b09ac | 2016-07-28 15:21:20 -0400 | [diff] [blame] | 942 | struct hfi1_filedata *fdata = arg; |
Mitko Haralanov | 06e0ffa | 2016-03-08 11:14:20 -0800 | [diff] [blame] | 943 | struct tid_rb_node *tnode = |
| 944 | container_of(node, struct tid_rb_node, mmu); |
Mitko Haralanov | f727a0c | 2016-02-05 11:57:46 -0500 | [diff] [blame] | 945 | |
Ira Weiny | 2677a76 | 2016-07-28 15:21:26 -0400 | [diff] [blame] | 946 | cacheless_tid_rb_remove(fdata, tnode); |
Mitko Haralanov | a92ba6d | 2016-02-03 14:34:41 -0800 | [diff] [blame] | 947 | } |